]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.39.4-201108162115.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.39.4-201108162115.patch
CommitLineData
ea04bca2
PK
1diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2--- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3+++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20+++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40--- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41+++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53+++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86--- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87+++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245--- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246+++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275--- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276+++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286--- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287+++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344--- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345+++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358--- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359+++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382--- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383+++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384@@ -258,6 +258,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404--- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405+++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430--- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431+++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456--- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457+++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513+++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525+++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536--- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537+++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587--- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588+++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639--- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640+++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658--- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659+++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671--- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672+++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715--- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716+++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726--- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727+++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757--- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758+++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774--- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775+++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804--- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805+++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816--- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817+++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837--- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838+++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928--- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929+++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964+++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975--- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976+++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028+++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039--- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040+++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062--- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063+++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085--- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086+++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109--- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110+++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121--- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122+++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133+++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150+++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166--- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167+++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185--- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186+++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187@@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1188 do_color_align = 0;
1189 if (filp || (flags & MAP_SHARED))
1190 do_color_align = 1;
1191+
1192+#ifdef CONFIG_PAX_RANDMMAP
1193+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1194+#endif
1195+
1196 if (addr) {
1197 if (do_color_align)
1198 addr = COLOUR_ALIGN(addr, pgoff);
1199 else
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202- if (task_size - len >= addr &&
1203- (!vmm || addr + len <= vmm->vm_start))
1204+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1205 return addr;
1206 }
1207 addr = current->mm->mmap_base;
1208@@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1211 return -ENOMEM;
1212- if (!vmm || addr + len <= vmm->vm_start)
1213+ if (check_heap_stack_gap(vmm, addr, len))
1214 return addr;
1215 addr = vmm->vm_end;
1216 if (do_color_align)
1217@@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1219 }
1220
1221-static inline unsigned long brk_rnd(void)
1222-{
1223- unsigned long rnd = get_random_int();
1224-
1225- rnd = rnd << PAGE_SHIFT;
1226- /* 8MB for 32bit, 256MB for 64bit */
1227- if (TASK_IS_32BIT_ADDR)
1228- rnd = rnd & 0x7ffffful;
1229- else
1230- rnd = rnd & 0xffffffful;
1231-
1232- return rnd;
1233-}
1234-
1235-unsigned long arch_randomize_brk(struct mm_struct *mm)
1236-{
1237- unsigned long base = mm->brk;
1238- unsigned long ret;
1239-
1240- ret = PAGE_ALIGN(base + brk_rnd());
1241-
1242- if (ret < mm->brk)
1243- return mm->brk;
1244-
1245- return ret;
1246-}
1247-
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1250 fd, off_t, offset)
1251diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252--- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253+++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1254@@ -28,6 +28,23 @@
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1257
1258+#ifdef CONFIG_PAX_PAGEEXEC
1259+void pax_report_insns(void *pc, void *sp)
1260+{
1261+ unsigned long i;
1262+
1263+ printk(KERN_ERR "PAX: bytes at PC: ");
1264+ for (i = 0; i < 5; i++) {
1265+ unsigned int c;
1266+ if (get_user(c, (unsigned int *)pc+i))
1267+ printk(KERN_CONT "???????? ");
1268+ else
1269+ printk(KERN_CONT "%08x ", c);
1270+ }
1271+ printk("\n");
1272+}
1273+#endif
1274+
1275 /*
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279--- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280+++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1282
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1284
1285+#ifdef CONFIG_PAX_ASLR
1286+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1287+
1288+#define PAX_DELTA_MMAP_LEN 16
1289+#define PAX_DELTA_STACK_LEN 16
1290+#endif
1291+
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296--- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297+++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298@@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1302+
1303+#ifdef CONFIG_PAX_PAGEEXEC
1304+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1307+#else
1308+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309+# define PAGE_COPY_NOEXEC PAGE_COPY
1310+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1311+#endif
1312+
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317--- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318+++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1319@@ -96,16 +96,38 @@
1320
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323+static inline int in_init_rx(struct module *me, void *loc)
1324+{
1325+ return (loc >= me->module_init_rx &&
1326+ loc < (me->module_init_rx + me->init_size_rx));
1327+}
1328+
1329+static inline int in_init_rw(struct module *me, void *loc)
1330+{
1331+ return (loc >= me->module_init_rw &&
1332+ loc < (me->module_init_rw + me->init_size_rw));
1333+}
1334+
1335 static inline int in_init(struct module *me, void *loc)
1336 {
1337- return (loc >= me->module_init &&
1338- loc <= (me->module_init + me->init_size));
1339+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1340+}
1341+
1342+static inline int in_core_rx(struct module *me, void *loc)
1343+{
1344+ return (loc >= me->module_core_rx &&
1345+ loc < (me->module_core_rx + me->core_size_rx));
1346+}
1347+
1348+static inline int in_core_rw(struct module *me, void *loc)
1349+{
1350+ return (loc >= me->module_core_rw &&
1351+ loc < (me->module_core_rw + me->core_size_rw));
1352 }
1353
1354 static inline int in_core(struct module *me, void *loc)
1355 {
1356- return (loc >= me->module_core &&
1357- loc <= (me->module_core + me->core_size));
1358+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1359 }
1360
1361 static inline int in_local(struct module *me, void *loc)
1362@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1363 }
1364
1365 /* align things a bit */
1366- me->core_size = ALIGN(me->core_size, 16);
1367- me->arch.got_offset = me->core_size;
1368- me->core_size += gots * sizeof(struct got_entry);
1369-
1370- me->core_size = ALIGN(me->core_size, 16);
1371- me->arch.fdesc_offset = me->core_size;
1372- me->core_size += fdescs * sizeof(Elf_Fdesc);
1373+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374+ me->arch.got_offset = me->core_size_rw;
1375+ me->core_size_rw += gots * sizeof(struct got_entry);
1376+
1377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378+ me->arch.fdesc_offset = me->core_size_rw;
1379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1380
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1384
1385 BUG_ON(value == 0);
1386
1387- got = me->module_core + me->arch.got_offset;
1388+ got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1391 goto out;
1392@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1393 #ifdef CONFIG_64BIT
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1395 {
1396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1398
1399 if (!value) {
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1402
1403 /* Create new one */
1404 fdesc->addr = value;
1405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1408 }
1409 #endif /* CONFIG_64BIT */
1410@@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1411
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1416
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420--- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421+++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1425 return -ENOMEM;
1426- if (!vma || addr + len <= vma->vm_start)
1427+ if (check_heap_stack_gap(vma, addr, len))
1428 return addr;
1429 addr = vma->vm_end;
1430 }
1431@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1434 return -ENOMEM;
1435- if (!vma || addr + len <= vma->vm_start)
1436+ if (check_heap_stack_gap(vma, addr, len))
1437 return addr;
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1442 return addr;
1443 if (!addr)
1444- addr = TASK_UNMAPPED_BASE;
1445+ addr = current->mm->mmap_base;
1446
1447 if (filp) {
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450--- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451+++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1453
1454 down_read(&current->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456- if (vma && (regs->iaoq[0] >= vma->vm_start)
1457- && (vma->vm_flags & VM_EXEC)) {
1458-
1459+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1462
1463diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464--- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465+++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1466@@ -15,6 +15,7 @@
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470+#include <linux/unistd.h>
1471
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1477 {
1478- if (code == 6 || code == 16)
1479+ if (code == 6 || code == 7 || code == 16)
1480 return VM_EXEC;
1481
1482 switch (inst & 0xf0000000) {
1483@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1484 }
1485 #endif
1486
1487+#ifdef CONFIG_PAX_PAGEEXEC
1488+/*
1489+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1490+ *
1491+ * returns 1 when task should be killed
1492+ * 2 when rt_sigreturn trampoline was detected
1493+ * 3 when unpatched PLT trampoline was detected
1494+ */
1495+static int pax_handle_fetch_fault(struct pt_regs *regs)
1496+{
1497+
1498+#ifdef CONFIG_PAX_EMUPLT
1499+ int err;
1500+
1501+ do { /* PaX: unpatched PLT emulation */
1502+ unsigned int bl, depwi;
1503+
1504+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1506+
1507+ if (err)
1508+ break;
1509+
1510+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1512+
1513+ err = get_user(ldw, (unsigned int *)addr);
1514+ err |= get_user(bv, (unsigned int *)(addr+4));
1515+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1516+
1517+ if (err)
1518+ break;
1519+
1520+ if (ldw == 0x0E801096U &&
1521+ bv == 0xEAC0C000U &&
1522+ ldw2 == 0x0E881095U)
1523+ {
1524+ unsigned int resolver, map;
1525+
1526+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1528+ if (err)
1529+ break;
1530+
1531+ regs->gr[20] = instruction_pointer(regs)+8;
1532+ regs->gr[21] = map;
1533+ regs->gr[22] = resolver;
1534+ regs->iaoq[0] = resolver | 3UL;
1535+ regs->iaoq[1] = regs->iaoq[0] + 4;
1536+ return 3;
1537+ }
1538+ }
1539+ } while (0);
1540+#endif
1541+
1542+#ifdef CONFIG_PAX_EMUTRAMP
1543+
1544+#ifndef CONFIG_PAX_EMUSIGRT
1545+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1546+ return 1;
1547+#endif
1548+
1549+ do { /* PaX: rt_sigreturn emulation */
1550+ unsigned int ldi1, ldi2, bel, nop;
1551+
1552+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1556+
1557+ if (err)
1558+ break;
1559+
1560+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561+ ldi2 == 0x3414015AU &&
1562+ bel == 0xE4008200U &&
1563+ nop == 0x08000240U)
1564+ {
1565+ regs->gr[25] = (ldi1 & 2) >> 1;
1566+ regs->gr[20] = __NR_rt_sigreturn;
1567+ regs->gr[31] = regs->iaoq[1] + 16;
1568+ regs->sr[0] = regs->iasq[1];
1569+ regs->iaoq[0] = 0x100UL;
1570+ regs->iaoq[1] = regs->iaoq[0] + 4;
1571+ regs->iasq[0] = regs->sr[2];
1572+ regs->iasq[1] = regs->sr[2];
1573+ return 2;
1574+ }
1575+ } while (0);
1576+#endif
1577+
1578+ return 1;
1579+}
1580+
1581+void pax_report_insns(void *pc, void *sp)
1582+{
1583+ unsigned long i;
1584+
1585+ printk(KERN_ERR "PAX: bytes at PC: ");
1586+ for (i = 0; i < 5; i++) {
1587+ unsigned int c;
1588+ if (get_user(c, (unsigned int *)pc+i))
1589+ printk(KERN_CONT "???????? ");
1590+ else
1591+ printk(KERN_CONT "%08x ", c);
1592+ }
1593+ printk("\n");
1594+}
1595+#endif
1596+
1597 int fixup_exception(struct pt_regs *regs)
1598 {
1599 const struct exception_table_entry *fix;
1600@@ -192,8 +303,33 @@ good_area:
1601
1602 acc_type = parisc_acctyp(code,regs->iir);
1603
1604- if ((vma->vm_flags & acc_type) != acc_type)
1605+ if ((vma->vm_flags & acc_type) != acc_type) {
1606+
1607+#ifdef CONFIG_PAX_PAGEEXEC
1608+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609+ (address & ~3UL) == instruction_pointer(regs))
1610+ {
1611+ up_read(&mm->mmap_sem);
1612+ switch (pax_handle_fetch_fault(regs)) {
1613+
1614+#ifdef CONFIG_PAX_EMUPLT
1615+ case 3:
1616+ return;
1617+#endif
1618+
1619+#ifdef CONFIG_PAX_EMUTRAMP
1620+ case 2:
1621+ return;
1622+#endif
1623+
1624+ }
1625+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626+ do_group_exit(SIGKILL);
1627+ }
1628+#endif
1629+
1630 goto bad_area;
1631+ }
1632
1633 /*
1634 * If for any reason at all we couldn't handle the fault, make
1635diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636--- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637+++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1641
1642-extern unsigned long randomize_et_dyn(unsigned long base);
1643-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644+#define ELF_ET_DYN_BASE (0x20000000)
1645+
1646+#ifdef CONFIG_PAX_ASLR
1647+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1648+
1649+#ifdef __powerpc64__
1650+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1652+#else
1653+#define PAX_DELTA_MMAP_LEN 15
1654+#define PAX_DELTA_STACK_LEN 15
1655+#endif
1656+#endif
1657
1658 /*
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1663
1664-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665-#define arch_randomize_brk arch_randomize_brk
1666-
1667 #endif /* __KERNEL__ */
1668
1669 /*
1670diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671--- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672+++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673@@ -27,6 +27,7 @@ enum km_type {
1674 KM_PPC_SYNC_PAGE,
1675 KM_PPC_SYNC_ICACHE,
1676 KM_KDB,
1677+ KM_CLEARPAGE,
1678 KM_TYPE_NR
1679 };
1680
1681diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682--- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683+++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684@@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1687 */
1688-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690+#define VM_STACK_DEFAULT_FLAGS32 \
1691+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1693
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1696
1697+#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1701+#endif
1702
1703 #include <asm-generic/getorder.h>
1704
1705diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706--- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707+++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1711 */
1712-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714+#define VM_DATA_DEFAULT_FLAGS32 \
1715+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1717
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1722 #endif
1723
1724+#define ktla_ktva(addr) (addr)
1725+#define ktva_ktla(addr) (addr)
1726+
1727 #ifndef __ASSEMBLY__
1728
1729 #undef STRICT_MM_TYPECHECKS
1730diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731--- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732+++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1733@@ -2,6 +2,7 @@
1734 #define _ASM_POWERPC_PGTABLE_H
1735 #ifdef __KERNEL__
1736
1737+#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742--- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743+++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1744@@ -21,6 +21,7 @@
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748+#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753--- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754+++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1755@@ -201,6 +201,7 @@
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764--- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765+++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766@@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1768 #endif
1769
1770-extern unsigned long arch_align_stack(unsigned long sp);
1771+#define arch_align_stack(x) ((x) & ~0xfUL)
1772
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776--- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777+++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1778@@ -13,6 +13,8 @@
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1781
1782+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1783+
1784 /*
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787@@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1790
1791-#ifndef __powerpc64__
1792-
1793-static inline unsigned long copy_from_user(void *to,
1794- const void __user *from, unsigned long n)
1795-{
1796- unsigned long over;
1797-
1798- if (access_ok(VERIFY_READ, from, n))
1799- return __copy_tofrom_user((__force void __user *)to, from, n);
1800- if ((unsigned long)from < TASK_SIZE) {
1801- over = (unsigned long)from + n - TASK_SIZE;
1802- return __copy_tofrom_user((__force void __user *)to, from,
1803- n - over) + over;
1804- }
1805- return n;
1806-}
1807-
1808-static inline unsigned long copy_to_user(void __user *to,
1809- const void *from, unsigned long n)
1810-{
1811- unsigned long over;
1812-
1813- if (access_ok(VERIFY_WRITE, to, n))
1814- return __copy_tofrom_user(to, (__force void __user *)from, n);
1815- if ((unsigned long)to < TASK_SIZE) {
1816- over = (unsigned long)to + n - TASK_SIZE;
1817- return __copy_tofrom_user(to, (__force void __user *)from,
1818- n - over) + over;
1819- }
1820- return n;
1821-}
1822-
1823-#else /* __powerpc64__ */
1824-
1825-#define __copy_in_user(to, from, size) \
1826- __copy_tofrom_user((to), (from), (size))
1827-
1828-extern unsigned long copy_from_user(void *to, const void __user *from,
1829- unsigned long n);
1830-extern unsigned long copy_to_user(void __user *to, const void *from,
1831- unsigned long n);
1832-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1833- unsigned long n);
1834-
1835-#endif /* __powerpc64__ */
1836-
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1839 {
1840@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1841 if (ret == 0)
1842 return 0;
1843 }
1844+
1845+ if (!__builtin_constant_p(n))
1846+ check_object_size(to, n, false);
1847+
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1849 }
1850
1851@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1852 if (ret == 0)
1853 return 0;
1854 }
1855+
1856+ if (!__builtin_constant_p(n))
1857+ check_object_size(from, n, true);
1858+
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1860 }
1861
1862@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1864 }
1865
1866+#ifndef __powerpc64__
1867+
1868+static inline unsigned long __must_check copy_from_user(void *to,
1869+ const void __user *from, unsigned long n)
1870+{
1871+ unsigned long over;
1872+
1873+ if ((long)n < 0)
1874+ return n;
1875+
1876+ if (access_ok(VERIFY_READ, from, n)) {
1877+ if (!__builtin_constant_p(n))
1878+ check_object_size(to, n, false);
1879+ return __copy_tofrom_user((__force void __user *)to, from, n);
1880+ }
1881+ if ((unsigned long)from < TASK_SIZE) {
1882+ over = (unsigned long)from + n - TASK_SIZE;
1883+ if (!__builtin_constant_p(n - over))
1884+ check_object_size(to, n - over, false);
1885+ return __copy_tofrom_user((__force void __user *)to, from,
1886+ n - over) + over;
1887+ }
1888+ return n;
1889+}
1890+
1891+static inline unsigned long __must_check copy_to_user(void __user *to,
1892+ const void *from, unsigned long n)
1893+{
1894+ unsigned long over;
1895+
1896+ if ((long)n < 0)
1897+ return n;
1898+
1899+ if (access_ok(VERIFY_WRITE, to, n)) {
1900+ if (!__builtin_constant_p(n))
1901+ check_object_size(from, n, true);
1902+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1903+ }
1904+ if ((unsigned long)to < TASK_SIZE) {
1905+ over = (unsigned long)to + n - TASK_SIZE;
1906+ if (!__builtin_constant_p(n))
1907+ check_object_size(from, n - over, true);
1908+ return __copy_tofrom_user(to, (__force void __user *)from,
1909+ n - over) + over;
1910+ }
1911+ return n;
1912+}
1913+
1914+#else /* __powerpc64__ */
1915+
1916+#define __copy_in_user(to, from, size) \
1917+ __copy_tofrom_user((to), (from), (size))
1918+
1919+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1920+{
1921+ if ((long)n < 0 || n > INT_MAX)
1922+ return n;
1923+
1924+ if (!__builtin_constant_p(n))
1925+ check_object_size(to, n, false);
1926+
1927+ if (likely(access_ok(VERIFY_READ, from, n)))
1928+ n = __copy_from_user(to, from, n);
1929+ else
1930+ memset(to, 0, n);
1931+ return n;
1932+}
1933+
1934+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1935+{
1936+ if ((long)n < 0 || n > INT_MAX)
1937+ return n;
1938+
1939+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940+ if (!__builtin_constant_p(n))
1941+ check_object_size(from, n, true);
1942+ n = __copy_to_user(to, from, n);
1943+ }
1944+ return n;
1945+}
1946+
1947+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1948+ unsigned long n);
1949+
1950+#endif /* __powerpc64__ */
1951+
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1953
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956--- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957+++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958@@ -495,6 +495,7 @@ storage_fault_common:
1959 std r14,_DAR(r1)
1960 std r15,_DSISR(r1)
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1962+ bl .save_nvgprs
1963 mr r4,r14
1964 mr r5,r15
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966@@ -504,8 +505,7 @@ storage_fault_common:
1967 cmpdi r3,0
1968 bne- 1f
1969 b .ret_from_except_lite
1970-1: bl .save_nvgprs
1971- mr r5,r3
1972+1: mr r5,r3
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1974 ld r4,_DAR(r1)
1975 bl .bad_page_fault
1976diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977--- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978+++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979@@ -848,10 +848,10 @@ handle_page_fault:
1980 11: ld r4,_DAR(r1)
1981 ld r5,_DSISR(r1)
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983+ bl .save_nvgprs
1984 bl .do_page_fault
1985 cmpdi r3,0
1986 beq+ 13f
1987- bl .save_nvgprs
1988 mr r5,r3
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990 lwz r4,_DAR(r1)
1991diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992--- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993+++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1996 }
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998- printk("Module doesn't contain .plt or .init.plt sections.\n");
1999+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2000 return -ENOEXEC;
2001 }
2002
2003@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2004
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007- if (location >= mod->module_core
2008- && location < mod->module_core + mod->core_size)
2009+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2012- else
2013+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2016+ else {
2017+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2018+ return ~0UL;
2019+ }
2020
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024--- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025+++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2026@@ -31,11 +31,24 @@
2027
2028 LIST_HEAD(module_bug_list);
2029
2030+#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2032 {
2033 if (size == 0)
2034 return NULL;
2035
2036+ return vmalloc(size);
2037+}
2038+
2039+void *module_alloc_exec(unsigned long size)
2040+#else
2041+void *module_alloc(unsigned long size)
2042+#endif
2043+
2044+{
2045+ if (size == 0)
2046+ return NULL;
2047+
2048 return vmalloc_exec(size);
2049 }
2050
2051@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2053 }
2054
2055+#ifdef CONFIG_PAX_KERNEXEC
2056+void module_free_exec(struct module *mod, void *module_region)
2057+{
2058+ module_free(mod, module_region);
2059+}
2060+#endif
2061+
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2064 const char *name)
2065diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066--- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067+++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068@@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2071 */
2072- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2076 #endif
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079@@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2080 newsp = stack[0];
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2087- printk(" (%pS)",
2088+ printk(" (%pA)",
2089 (void *)current->ret_stack[curr_frame].ret);
2090 curr_frame--;
2091 }
2092@@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2095 lr = regs->link;
2096- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2099 firstframe = 1;
2100 }
2101@@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2102 }
2103
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2105-
2106-unsigned long arch_align_stack(unsigned long sp)
2107-{
2108- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109- sp -= get_random_int() & ~PAGE_MASK;
2110- return sp & ~0xf;
2111-}
2112-
2113-static inline unsigned long brk_rnd(void)
2114-{
2115- unsigned long rnd = 0;
2116-
2117- /* 8MB for 32bit, 1GB for 64bit */
2118- if (is_32bit_task())
2119- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2120- else
2121- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2122-
2123- return rnd << PAGE_SHIFT;
2124-}
2125-
2126-unsigned long arch_randomize_brk(struct mm_struct *mm)
2127-{
2128- unsigned long base = mm->brk;
2129- unsigned long ret;
2130-
2131-#ifdef CONFIG_PPC_STD_MMU_64
2132- /*
2133- * If we are using 1TB segments and we are allowed to randomise
2134- * the heap, we can put it above 1TB so it is backed by a 1TB
2135- * segment. Otherwise the heap will be in the bottom 1TB
2136- * which always uses 256MB segments and this may result in a
2137- * performance penalty.
2138- */
2139- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2141-#endif
2142-
2143- ret = PAGE_ALIGN(base + brk_rnd());
2144-
2145- if (ret < mm->brk)
2146- return mm->brk;
2147-
2148- return ret;
2149-}
2150-
2151-unsigned long randomize_et_dyn(unsigned long base)
2152-{
2153- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2154-
2155- if (ret < base)
2156- return base;
2157-
2158- return ret;
2159-}
2160diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161--- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162+++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163@@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2166 addr = frame;
2167- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2170 goto badframe;
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173--- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174+++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2177
2178 /* Set up to return from userspace. */
2179- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2182 } else {
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185--- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186+++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187@@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2189 #endif
2190
2191+extern void gr_handle_kernel_exploit(void);
2192+
2193 int die(const char *str, struct pt_regs *regs, long err)
2194 {
2195 static struct {
2196@@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2197 if (panic_on_oops)
2198 panic("Fatal exception");
2199
2200+ gr_handle_kernel_exploit();
2201+
2202 oops_exit();
2203 do_exit(err);
2204
2205diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206--- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207+++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2208@@ -36,6 +36,7 @@
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212+#include <asm/mman.h>
2213
2214 #include "setup.h"
2215
2216@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2218 #endif
2219
2220- current->mm->context.vdso_base = 0;
2221+ current->mm->context.vdso_base = ~0UL;
2222
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2224 * process
2225@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2229- 0, 0);
2230+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2232 rc = vdso_base;
2233 goto fail_mmapsem;
2234diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235--- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236+++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2237@@ -9,22 +9,6 @@
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2240
2241-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2242-{
2243- if (likely(access_ok(VERIFY_READ, from, n)))
2244- n = __copy_from_user(to, from, n);
2245- else
2246- memset(to, 0, n);
2247- return n;
2248-}
2249-
2250-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_WRITE, to, n)))
2253- n = __copy_to_user(to, from, n);
2254- return n;
2255-}
2256-
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2258 unsigned long n)
2259 {
2260@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2261 return n;
2262 }
2263
2264-EXPORT_SYMBOL(copy_from_user);
2265-EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2267
2268diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269--- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270+++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2271@@ -31,6 +31,10 @@
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275+#include <linux/slab.h>
2276+#include <linux/pagemap.h>
2277+#include <linux/compiler.h>
2278+#include <linux/unistd.h>
2279
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2282@@ -42,6 +46,7 @@
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286+#include <asm/ptrace.h>
2287
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290@@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2291 }
2292 #endif
2293
2294+#ifdef CONFIG_PAX_PAGEEXEC
2295+/*
2296+ * PaX: decide what to do with offenders (regs->nip = fault address)
2297+ *
2298+ * returns 1 when task should be killed
2299+ */
2300+static int pax_handle_fetch_fault(struct pt_regs *regs)
2301+{
2302+ return 1;
2303+}
2304+
2305+void pax_report_insns(void *pc, void *sp)
2306+{
2307+ unsigned long i;
2308+
2309+ printk(KERN_ERR "PAX: bytes at PC: ");
2310+ for (i = 0; i < 5; i++) {
2311+ unsigned int c;
2312+ if (get_user(c, (unsigned int __user *)pc+i))
2313+ printk(KERN_CONT "???????? ");
2314+ else
2315+ printk(KERN_CONT "%08x ", c);
2316+ }
2317+ printk("\n");
2318+}
2319+#endif
2320+
2321 /*
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324@@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2326 */
2327 if (trap == 0x400)
2328- error_code &= 0x48200000;
2329+ error_code &= 0x58200000;
2330 else
2331 is_write = error_code & DSISR_ISSTORE;
2332 #else
2333@@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2336 */
2337- if (error_code & 0x10000000)
2338+ if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2340 goto bad_area;
2341 #endif /* CONFIG_8xx */
2342@@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2344 * as embedded.
2345 */
2346- if (error_code & DSISR_PROTFAULT)
2347+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2348 goto bad_area;
2349 #endif /* CONFIG_PPC_STD_MMU */
2350
2351@@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2355+
2356+#ifdef CONFIG_PAX_PAGEEXEC
2357+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358+#ifdef CONFIG_PPC_STD_MMU
2359+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2360+#else
2361+ if (is_exec && regs->nip == address) {
2362+#endif
2363+ switch (pax_handle_fetch_fault(regs)) {
2364+ }
2365+
2366+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367+ do_group_exit(SIGKILL);
2368+ }
2369+ }
2370+#endif
2371+
2372 _exception(SIGSEGV, regs, code, address);
2373 return 0;
2374 }
2375diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376--- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377+++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2379 */
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2382+
2383+#ifdef CONFIG_PAX_RANDMMAP
2384+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2385+ mm->mmap_base += mm->delta_mmap;
2386+#endif
2387+
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2390 } else {
2391 mm->mmap_base = mmap_base();
2392+
2393+#ifdef CONFIG_PAX_RANDMMAP
2394+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2395+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2396+#endif
2397+
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2400 }
2401diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402--- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403+++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2406 return 0;
2407 vma = find_vma(mm, addr);
2408- return (!vma || (addr + len) <= vma->vm_start);
2409+ return check_heap_stack_gap(vma, addr, len);
2410 }
2411
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413@@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2415 continue;
2416 }
2417- if (!vma || addr + len <= vma->vm_start) {
2418+ if (check_heap_stack_gap(vma, addr, len)) {
2419 /*
2420 * Remember the place where we stopped the search:
2421 */
2422@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2423 }
2424 }
2425
2426- addr = mm->mmap_base;
2427- while (addr > len) {
2428+ if (mm->mmap_base < len)
2429+ addr = -ENOMEM;
2430+ else
2431+ addr = mm->mmap_base - len;
2432+
2433+ while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2437
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2442 */
2443 vma = find_vma(mm, addr);
2444- if (!vma || (addr + len) <= vma->vm_start) {
2445+ if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2447 if (use_cache)
2448 mm->free_area_cache = addr;
2449@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2451
2452 /* try just below the current vma->vm_start */
2453- addr = vma->vm_start;
2454+ addr = skip_heap_stack_gap(vma, len);
2455 }
2456
2457 /*
2458@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2460 return -EINVAL;
2461
2462+#ifdef CONFIG_PAX_RANDMMAP
2463+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2464+ addr = 0;
2465+#endif
2466+
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471--- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472+++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2476
2477-extern unsigned long randomize_et_dyn(unsigned long base);
2478-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2480+
2481+#ifdef CONFIG_PAX_ASLR
2482+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2483+
2484+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2486+#endif
2487
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490@@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2493
2494-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495-#define arch_randomize_brk arch_randomize_brk
2496-
2497 #endif
2498diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499--- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500+++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2504
2505-extern unsigned long arch_align_stack(unsigned long sp);
2506+#define arch_align_stack(x) ((x) & ~0xfUL)
2507
2508 static inline int tprot(unsigned long addr)
2509 {
2510diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511--- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512+++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513@@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2515 {
2516 might_fault();
2517+
2518+ if ((long)n < 0)
2519+ return n;
2520+
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2523 return n;
2524@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2527 {
2528+ if ((long)n < 0)
2529+ return n;
2530+
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2533 else
2534@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2536
2537 might_fault();
2538+
2539+ if ((long)n < 0)
2540+ return n;
2541+
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2544 return n;
2545diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546--- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547+++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548@@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2550 help
2551 This option allows to enable a buffer overflow protection for user
2552- space programs and it also selects the addressing mode option above.
2553- The kernel parameter noexec=on will enable this feature and also
2554- switch the addressing modes, default is disabled. Enabling this (via
2555- kernel parameter) on machines earlier than IBM System z9 this will
2556- reduce system performance.
2557+ space programs.
2558+ Enabling this (via kernel parameter) on machines earlier than IBM
2559+ System z9 this will reduce system performance.
2560
2561 comment "Code generation options"
2562
2563diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564--- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565+++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2567
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570- me->core_size = ALIGN(me->core_size, 4);
2571- me->arch.got_offset = me->core_size;
2572- me->core_size += me->arch.got_size;
2573- me->arch.plt_offset = me->core_size;
2574- me->core_size += me->arch.plt_size;
2575+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576+ me->arch.got_offset = me->core_size_rw;
2577+ me->core_size_rw += me->arch.got_size;
2578+ me->arch.plt_offset = me->core_size_rx;
2579+ me->core_size_rx += me->arch.plt_size;
2580 return 0;
2581 }
2582
2583@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2585 Elf_Addr *gotent;
2586
2587- gotent = me->module_core + me->arch.got_offset +
2588+ gotent = me->module_core_rw + me->arch.got_offset +
2589 info->got_offset;
2590 *gotent = val;
2591 info->got_initialized = 1;
2592@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596- (val + (Elf_Addr) me->module_core - loc) >> 1;
2597+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2604 unsigned int *ip;
2605- ip = me->module_core + me->arch.plt_offset +
2606+ ip = me->module_core_rx + me->arch.plt_offset +
2607 info->plt_offset;
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614- val = (Elf_Addr) me->module_core +
2615+ val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2617 info->plt_offset;
2618 val += rela->r_addend - loc;
2619@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623- ((Elf_Addr) me->module_core + me->arch.got_offset);
2624+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2629 break;
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638--- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639+++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640@@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2641 }
2642 return 0;
2643 }
2644-
2645-unsigned long arch_align_stack(unsigned long sp)
2646-{
2647- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648- sp -= get_random_int() & ~PAGE_MASK;
2649- return sp & ~0xf;
2650-}
2651-
2652-static inline unsigned long brk_rnd(void)
2653-{
2654- /* 8MB for 32bit, 1GB for 64bit */
2655- if (is_32bit_task())
2656- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2657- else
2658- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2659-}
2660-
2661-unsigned long arch_randomize_brk(struct mm_struct *mm)
2662-{
2663- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2664-
2665- if (ret < mm->brk)
2666- return mm->brk;
2667- return ret;
2668-}
2669-
2670-unsigned long randomize_et_dyn(unsigned long base)
2671-{
2672- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2673-
2674- if (!(current->flags & PF_RANDOMIZE))
2675- return base;
2676- if (ret < base)
2677- return base;
2678- return ret;
2679-}
2680diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681--- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682+++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2684 }
2685 early_param("mem", early_parse_mem);
2686
2687-unsigned int user_mode = HOME_SPACE_MODE;
2688+unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2690
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692@@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2693 }
2694 }
2695
2696-/*
2697- * Switch kernel/user addressing modes?
2698- */
2699-static int __init early_parse_switch_amode(char *p)
2700-{
2701- if (user_mode != SECONDARY_SPACE_MODE)
2702- user_mode = PRIMARY_SPACE_MODE;
2703- return 0;
2704-}
2705-early_param("switch_amode", early_parse_switch_amode);
2706-
2707 static int __init early_parse_user_mode(char *p)
2708 {
2709 if (p && strcmp(p, "primary") == 0)
2710@@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2711 }
2712 early_param("user_mode", early_parse_user_mode);
2713
2714-#ifdef CONFIG_S390_EXEC_PROTECT
2715-/*
2716- * Enable execute protection?
2717- */
2718-static int __init early_parse_noexec(char *p)
2719-{
2720- if (!strncmp(p, "off", 3))
2721- return 0;
2722- user_mode = SECONDARY_SPACE_MODE;
2723- return 0;
2724-}
2725-early_param("noexec", early_parse_noexec);
2726-#endif /* CONFIG_S390_EXEC_PROTECT */
2727-
2728 static void setup_addressing_mode(void)
2729 {
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732--- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733+++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2735 */
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2738+
2739+#ifdef CONFIG_PAX_RANDMMAP
2740+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2741+ mm->mmap_base += mm->delta_mmap;
2742+#endif
2743+
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2746 } else {
2747 mm->mmap_base = mmap_base();
2748+
2749+#ifdef CONFIG_PAX_RANDMMAP
2750+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2751+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2752+#endif
2753+
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2756 }
2757@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2758 */
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2761+
2762+#ifdef CONFIG_PAX_RANDMMAP
2763+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2764+ mm->mmap_base += mm->delta_mmap;
2765+#endif
2766+
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2769 } else {
2770 mm->mmap_base = mmap_base();
2771+
2772+#ifdef CONFIG_PAX_RANDMMAP
2773+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2774+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2775+#endif
2776+
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2779 }
2780diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781--- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782+++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783@@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2785
2786 typedef void (*vi_handler_t)(void);
2787-extern unsigned long arch_align_stack(unsigned long sp);
2788+#define arch_align_stack(x) (x)
2789
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793--- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794+++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2796
2797 return task_pt_regs(task)->cp0_epc;
2798 }
2799-
2800-unsigned long arch_align_stack(unsigned long sp)
2801-{
2802- return sp;
2803-}
2804diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805--- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806+++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811- if (TASK_SIZE - len >= addr &&
2812- (!vma || addr + len <= vma->vm_start))
2813+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2814 return addr;
2815 }
2816
2817@@ -106,7 +105,7 @@ full_search:
2818 }
2819 return -ENOMEM;
2820 }
2821- if (likely(!vma || addr + len <= vma->vm_start)) {
2822+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2823 /*
2824 * Remember the place where we stopped the search:
2825 */
2826@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2828
2829 vma = find_vma(mm, addr);
2830- if (TASK_SIZE - len >= addr &&
2831- (!vma || addr + len <= vma->vm_start))
2832+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2833 return addr;
2834 }
2835
2836@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840- if (!vma || addr <= vma->vm_start) {
2841+ if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2844 }
2845@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2847 goto bottomup;
2848
2849- addr = mm->mmap_base-len;
2850- if (do_colour_align)
2851- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852+ addr = mm->mmap_base - len;
2853
2854 do {
2855+ if (do_colour_align)
2856+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2857 /*
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2861 */
2862 vma = find_vma(mm, addr);
2863- if (likely(!vma || addr+len <= vma->vm_start)) {
2864+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2867 }
2868@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2870
2871 /* try just below the current vma->vm_start */
2872- addr = vma->vm_start-len;
2873- if (do_colour_align)
2874- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875- } while (likely(len < vma->vm_start));
2876+ addr = skip_heap_stack_gap(vma, len);
2877+ } while (!IS_ERR_VALUE(addr));
2878
2879 bottomup:
2880 /*
2881diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882--- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883+++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-05 20:34:06.000000000 -0400
2884@@ -14,18 +14,40 @@
2885 #define ATOMIC64_INIT(i) { (i) }
2886
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2889+{
2890+ return v->counter;
2891+}
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2894+{
2895+ return v->counter;
2896+}
2897
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2900+{
2901+ v->counter = i;
2902+}
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2905+{
2906+ v->counter = i;
2907+}
2908
2909 extern void atomic_add(int, atomic_t *);
2910+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2917
2918 extern int atomic_add_ret(int, atomic_t *);
2919+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2924
2925@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2927
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2930+{
2931+ return atomic_add_ret_unchecked(1, v);
2932+}
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2935+{
2936+ return atomic64_add_ret_unchecked(1, v);
2937+}
2938
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2941
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2944+{
2945+ return atomic_add_ret_unchecked(i, v);
2946+}
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2949+{
2950+ return atomic64_add_ret_unchecked(i, v);
2951+}
2952
2953 /*
2954 * atomic_inc_and_test - increment and test
2955@@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
2956 * other cases.
2957 */
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
2960 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2961
2962 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2963@@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
2964 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2965
2966 #define atomic_inc(v) atomic_add(1, v)
2967+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2968+{
2969+ atomic_add_unchecked(1, v);
2970+}
2971 #define atomic64_inc(v) atomic64_add(1, v)
2972+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2973+{
2974+ atomic64_add_unchecked(1, v);
2975+}
2976
2977 #define atomic_dec(v) atomic_sub(1, v)
2978+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2979+{
2980+ atomic_sub_unchecked(1, v);
2981+}
2982 #define atomic64_dec(v) atomic64_sub(1, v)
2983+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2984+{
2985+ atomic64_sub_unchecked(1, v);
2986+}
2987
2988 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2989 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2990
2991 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2992+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2993 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2994+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
2995
2996 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2997 {
2998- int c, old;
2999+ int c, old, new;
3000 c = atomic_read(v);
3001 for (;;) {
3002- if (unlikely(c == (u)))
3003+ if (unlikely(c == u))
3004 break;
3005- old = atomic_cmpxchg((v), c, c + (a));
3006+
3007+ asm volatile("addcc %2, %0, %0\n"
3008+
3009+#ifdef CONFIG_PAX_REFCOUNT
3010+ "tvs %%icc, 6\n"
3011+#endif
3012+
3013+ : "=r" (new)
3014+ : "0" (c), "ir" (a)
3015+ : "cc");
3016+
3017+ old = atomic_cmpxchg(v, c, new);
3018 if (likely(old == c))
3019 break;
3020 c = old;
3021 }
3022- return c != (u);
3023+ return c != u;
3024 }
3025
3026 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3027@@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3028
3029 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3030 {
3031- long c, old;
3032+ long c, old, new;
3033 c = atomic64_read(v);
3034 for (;;) {
3035- if (unlikely(c == (u)))
3036+ if (unlikely(c == u))
3037 break;
3038- old = atomic64_cmpxchg((v), c, c + (a));
3039+
3040+ asm volatile("addcc %2, %0, %0\n"
3041+
3042+#ifdef CONFIG_PAX_REFCOUNT
3043+ "tvs %%xcc, 6\n"
3044+#endif
3045+
3046+ : "=r" (new)
3047+ : "0" (c), "ir" (a)
3048+ : "cc");
3049+
3050+ old = atomic64_cmpxchg(v, c, new);
3051 if (likely(old == c))
3052 break;
3053 c = old;
3054 }
3055- return c != (u);
3056+ return c != u;
3057 }
3058
3059 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3060diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3061--- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3062+++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3063@@ -10,7 +10,7 @@
3064 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3065
3066 #define L1_CACHE_SHIFT 5
3067-#define L1_CACHE_BYTES 32
3068+#define L1_CACHE_BYTES 32UL
3069
3070 #ifdef CONFIG_SPARC32
3071 #define SMP_CACHE_BYTES_SHIFT 5
3072diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3073--- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3074+++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3075@@ -114,6 +114,13 @@ typedef struct {
3076
3077 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3078
3079+#ifdef CONFIG_PAX_ASLR
3080+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3081+
3082+#define PAX_DELTA_MMAP_LEN 16
3083+#define PAX_DELTA_STACK_LEN 16
3084+#endif
3085+
3086 /* This yields a mask that user programs can use to figure out what
3087 instruction set this cpu supports. This can NOT be done in userspace
3088 on Sparc. */
3089diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3090--- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3091+++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3092@@ -162,6 +162,12 @@ typedef struct {
3093 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3094 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3095
3096+#ifdef CONFIG_PAX_ASLR
3097+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3098+
3099+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3100+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3101+#endif
3102
3103 /* This yields a mask that user programs can use to figure out what
3104 instruction set this cpu supports. */
3105diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3106--- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3107+++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3108@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3109 BTFIXUPDEF_INT(page_none)
3110 BTFIXUPDEF_INT(page_copy)
3111 BTFIXUPDEF_INT(page_readonly)
3112+
3113+#ifdef CONFIG_PAX_PAGEEXEC
3114+BTFIXUPDEF_INT(page_shared_noexec)
3115+BTFIXUPDEF_INT(page_copy_noexec)
3116+BTFIXUPDEF_INT(page_readonly_noexec)
3117+#endif
3118+
3119 BTFIXUPDEF_INT(page_kernel)
3120
3121 #define PMD_SHIFT SUN4C_PMD_SHIFT
3122@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3123 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3124 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3125
3126+#ifdef CONFIG_PAX_PAGEEXEC
3127+extern pgprot_t PAGE_SHARED_NOEXEC;
3128+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3129+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3130+#else
3131+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3132+# define PAGE_COPY_NOEXEC PAGE_COPY
3133+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3134+#endif
3135+
3136 extern unsigned long page_kernel;
3137
3138 #ifdef MODULE
3139diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3140--- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3141+++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3142@@ -115,6 +115,13 @@
3143 SRMMU_EXEC | SRMMU_REF)
3144 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3145 SRMMU_EXEC | SRMMU_REF)
3146+
3147+#ifdef CONFIG_PAX_PAGEEXEC
3148+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3149+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3150+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3151+#endif
3152+
3153 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3154 SRMMU_DIRTY | SRMMU_REF)
3155
3156diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3157--- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3158+++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-05 19:44:33.000000000 -0400
3159@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3160
3161 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3162
3163-static void inline arch_read_lock(arch_rwlock_t *lock)
3164+static inline void arch_read_lock(arch_rwlock_t *lock)
3165 {
3166 unsigned long tmp1, tmp2;
3167
3168 __asm__ __volatile__ (
3169 "1: ldsw [%2], %0\n"
3170 " brlz,pn %0, 2f\n"
3171-"4: add %0, 1, %1\n"
3172+"4: addcc %0, 1, %1\n"
3173+
3174+#ifdef CONFIG_PAX_REFCOUNT
3175+" tvs %%icc, 6\n"
3176+#endif
3177+
3178 " cas [%2], %0, %1\n"
3179 " cmp %0, %1\n"
3180 " bne,pn %%icc, 1b\n"
3181@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3182 " .previous"
3183 : "=&r" (tmp1), "=&r" (tmp2)
3184 : "r" (lock)
3185- : "memory");
3186+ : "memory", "cc");
3187 }
3188
3189-static int inline arch_read_trylock(arch_rwlock_t *lock)
3190+static inline int arch_read_trylock(arch_rwlock_t *lock)
3191 {
3192 int tmp1, tmp2;
3193
3194@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3195 "1: ldsw [%2], %0\n"
3196 " brlz,a,pn %0, 2f\n"
3197 " mov 0, %0\n"
3198-" add %0, 1, %1\n"
3199+" addcc %0, 1, %1\n"
3200+
3201+#ifdef CONFIG_PAX_REFCOUNT
3202+" tvs %%icc, 6\n"
3203+#endif
3204+
3205 " cas [%2], %0, %1\n"
3206 " cmp %0, %1\n"
3207 " bne,pn %%icc, 1b\n"
3208@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3209 return tmp1;
3210 }
3211
3212-static void inline arch_read_unlock(arch_rwlock_t *lock)
3213+static inline void arch_read_unlock(arch_rwlock_t *lock)
3214 {
3215 unsigned long tmp1, tmp2;
3216
3217 __asm__ __volatile__(
3218 "1: lduw [%2], %0\n"
3219-" sub %0, 1, %1\n"
3220+" subcc %0, 1, %1\n"
3221+
3222+#ifdef CONFIG_PAX_REFCOUNT
3223+" tvs %%icc, 6\n"
3224+#endif
3225+
3226 " cas [%2], %0, %1\n"
3227 " cmp %0, %1\n"
3228 " bne,pn %%xcc, 1b\n"
3229@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3230 : "memory");
3231 }
3232
3233-static void inline arch_write_lock(arch_rwlock_t *lock)
3234+static inline void arch_write_lock(arch_rwlock_t *lock)
3235 {
3236 unsigned long mask, tmp1, tmp2;
3237
3238@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3239 : "memory");
3240 }
3241
3242-static void inline arch_write_unlock(arch_rwlock_t *lock)
3243+static inline void arch_write_unlock(arch_rwlock_t *lock)
3244 {
3245 __asm__ __volatile__(
3246 " stw %%g0, [%0]"
3247@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3248 : "memory");
3249 }
3250
3251-static int inline arch_write_trylock(arch_rwlock_t *lock)
3252+static inline int arch_write_trylock(arch_rwlock_t *lock)
3253 {
3254 unsigned long mask, tmp1, tmp2, result;
3255
3256diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3257--- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3258+++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3259@@ -50,6 +50,8 @@ struct thread_info {
3260 unsigned long w_saved;
3261
3262 struct restart_block restart_block;
3263+
3264+ unsigned long lowest_stack;
3265 };
3266
3267 /*
3268diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3269--- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3270+++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3271@@ -63,6 +63,8 @@ struct thread_info {
3272 struct pt_regs *kern_una_regs;
3273 unsigned int kern_una_insn;
3274
3275+ unsigned long lowest_stack;
3276+
3277 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3278 };
3279
3280diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3281--- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3282+++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3283@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3284
3285 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3286 {
3287- if (n && __access_ok((unsigned long) to, n))
3288+ if ((long)n < 0)
3289+ return n;
3290+
3291+ if (n && __access_ok((unsigned long) to, n)) {
3292+ if (!__builtin_constant_p(n))
3293+ check_object_size(from, n, true);
3294 return __copy_user(to, (__force void __user *) from, n);
3295- else
3296+ } else
3297 return n;
3298 }
3299
3300 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3301 {
3302+ if ((long)n < 0)
3303+ return n;
3304+
3305+ if (!__builtin_constant_p(n))
3306+ check_object_size(from, n, true);
3307+
3308 return __copy_user(to, (__force void __user *) from, n);
3309 }
3310
3311 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3312 {
3313- if (n && __access_ok((unsigned long) from, n))
3314+ if ((long)n < 0)
3315+ return n;
3316+
3317+ if (n && __access_ok((unsigned long) from, n)) {
3318+ if (!__builtin_constant_p(n))
3319+ check_object_size(to, n, false);
3320 return __copy_user((__force void __user *) to, from, n);
3321- else
3322+ } else
3323 return n;
3324 }
3325
3326 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3327 {
3328+ if ((long)n < 0)
3329+ return n;
3330+
3331 return __copy_user((__force void __user *) to, from, n);
3332 }
3333
3334diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3335--- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3336+++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3337@@ -10,6 +10,7 @@
3338 #include <linux/compiler.h>
3339 #include <linux/string.h>
3340 #include <linux/thread_info.h>
3341+#include <linux/kernel.h>
3342 #include <asm/asi.h>
3343 #include <asm/system.h>
3344 #include <asm/spitfire.h>
3345@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3346 static inline unsigned long __must_check
3347 copy_from_user(void *to, const void __user *from, unsigned long size)
3348 {
3349- unsigned long ret = ___copy_from_user(to, from, size);
3350+ unsigned long ret;
3351
3352+ if ((long)size < 0 || size > INT_MAX)
3353+ return size;
3354+
3355+ if (!__builtin_constant_p(size))
3356+ check_object_size(to, size, false);
3357+
3358+ ret = ___copy_from_user(to, from, size);
3359 if (unlikely(ret))
3360 ret = copy_from_user_fixup(to, from, size);
3361
3362@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3363 static inline unsigned long __must_check
3364 copy_to_user(void __user *to, const void *from, unsigned long size)
3365 {
3366- unsigned long ret = ___copy_to_user(to, from, size);
3367+ unsigned long ret;
3368+
3369+ if ((long)size < 0 || size > INT_MAX)
3370+ return size;
3371+
3372+ if (!__builtin_constant_p(size))
3373+ check_object_size(from, size, true);
3374
3375+ ret = ___copy_to_user(to, from, size);
3376 if (unlikely(ret))
3377 ret = copy_to_user_fixup(to, from, size);
3378 return ret;
3379diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3380--- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3381+++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3382@@ -1,5 +1,13 @@
3383 #ifndef ___ASM_SPARC_UACCESS_H
3384 #define ___ASM_SPARC_UACCESS_H
3385+
3386+#ifdef __KERNEL__
3387+#ifndef __ASSEMBLY__
3388+#include <linux/types.h>
3389+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3390+#endif
3391+#endif
3392+
3393 #if defined(__sparc__) && defined(__arch64__)
3394 #include <asm/uaccess_64.h>
3395 #else
3396diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3397--- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3398+++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3399@@ -3,7 +3,7 @@
3400 #
3401
3402 asflags-y := -ansi
3403-ccflags-y := -Werror
3404+#ccflags-y := -Werror
3405
3406 extra-y := head_$(BITS).o
3407 extra-y += init_task.o
3408diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3409--- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3410+++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3411@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3412 rw->ins[4], rw->ins[5],
3413 rw->ins[6],
3414 rw->ins[7]);
3415- printk("%pS\n", (void *) rw->ins[7]);
3416+ printk("%pA\n", (void *) rw->ins[7]);
3417 rw = (struct reg_window32 *) rw->ins[6];
3418 }
3419 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3420@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3421
3422 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3423 r->psr, r->pc, r->npc, r->y, print_tainted());
3424- printk("PC: <%pS>\n", (void *) r->pc);
3425+ printk("PC: <%pA>\n", (void *) r->pc);
3426 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3427 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3428 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3429 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3430 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3431 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3432- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3433+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3434
3435 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3436 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3437@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3438 rw = (struct reg_window32 *) fp;
3439 pc = rw->ins[7];
3440 printk("[%08lx : ", pc);
3441- printk("%pS ] ", (void *) pc);
3442+ printk("%pA ] ", (void *) pc);
3443 fp = rw->ins[6];
3444 } while (++count < 16);
3445 printk("\n");
3446diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3447--- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3448+++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3449@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3450 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3451 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3452 if (regs->tstate & TSTATE_PRIV)
3453- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3454+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3455 }
3456
3457 void show_regs(struct pt_regs *regs)
3458 {
3459 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3460 regs->tpc, regs->tnpc, regs->y, print_tainted());
3461- printk("TPC: <%pS>\n", (void *) regs->tpc);
3462+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3463 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3464 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3465 regs->u_regs[3]);
3466@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3467 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3468 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3469 regs->u_regs[15]);
3470- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3471+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3472 show_regwindow(regs);
3473 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3474 }
3475@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3476 ((tp && tp->task) ? tp->task->pid : -1));
3477
3478 if (gp->tstate & TSTATE_PRIV) {
3479- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3480+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3481 (void *) gp->tpc,
3482 (void *) gp->o7,
3483 (void *) gp->i7,
3484diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3485--- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3486+++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3487@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3488 if (ARCH_SUN4C && len > 0x20000000)
3489 return -ENOMEM;
3490 if (!addr)
3491- addr = TASK_UNMAPPED_BASE;
3492+ addr = current->mm->mmap_base;
3493
3494 if (flags & MAP_SHARED)
3495 addr = COLOUR_ALIGN(addr);
3496@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3497 }
3498 if (TASK_SIZE - PAGE_SIZE - len < addr)
3499 return -ENOMEM;
3500- if (!vmm || addr + len <= vmm->vm_start)
3501+ if (check_heap_stack_gap(vmm, addr, len))
3502 return addr;
3503 addr = vmm->vm_end;
3504 if (flags & MAP_SHARED)
3505diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3506--- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3507+++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3508@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3509 /* We do not accept a shared mapping if it would violate
3510 * cache aliasing constraints.
3511 */
3512- if ((flags & MAP_SHARED) &&
3513+ if ((filp || (flags & MAP_SHARED)) &&
3514 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3515 return -EINVAL;
3516 return addr;
3517@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3518 if (filp || (flags & MAP_SHARED))
3519 do_color_align = 1;
3520
3521+#ifdef CONFIG_PAX_RANDMMAP
3522+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3523+#endif
3524+
3525 if (addr) {
3526 if (do_color_align)
3527 addr = COLOUR_ALIGN(addr, pgoff);
3528@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3529 addr = PAGE_ALIGN(addr);
3530
3531 vma = find_vma(mm, addr);
3532- if (task_size - len >= addr &&
3533- (!vma || addr + len <= vma->vm_start))
3534+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3535 return addr;
3536 }
3537
3538 if (len > mm->cached_hole_size) {
3539- start_addr = addr = mm->free_area_cache;
3540+ start_addr = addr = mm->free_area_cache;
3541 } else {
3542- start_addr = addr = TASK_UNMAPPED_BASE;
3543+ start_addr = addr = mm->mmap_base;
3544 mm->cached_hole_size = 0;
3545 }
3546
3547@@ -174,14 +177,14 @@ full_search:
3548 vma = find_vma(mm, VA_EXCLUDE_END);
3549 }
3550 if (unlikely(task_size < addr)) {
3551- if (start_addr != TASK_UNMAPPED_BASE) {
3552- start_addr = addr = TASK_UNMAPPED_BASE;
3553+ if (start_addr != mm->mmap_base) {
3554+ start_addr = addr = mm->mmap_base;
3555 mm->cached_hole_size = 0;
3556 goto full_search;
3557 }
3558 return -ENOMEM;
3559 }
3560- if (likely(!vma || addr + len <= vma->vm_start)) {
3561+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3562 /*
3563 * Remember the place where we stopped the search:
3564 */
3565@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3566 /* We do not accept a shared mapping if it would violate
3567 * cache aliasing constraints.
3568 */
3569- if ((flags & MAP_SHARED) &&
3570+ if ((filp || (flags & MAP_SHARED)) &&
3571 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3572 return -EINVAL;
3573 return addr;
3574@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3575 addr = PAGE_ALIGN(addr);
3576
3577 vma = find_vma(mm, addr);
3578- if (task_size - len >= addr &&
3579- (!vma || addr + len <= vma->vm_start))
3580+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3581 return addr;
3582 }
3583
3584@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 /* make sure it can fit in the remaining address space */
3586 if (likely(addr > len)) {
3587 vma = find_vma(mm, addr-len);
3588- if (!vma || addr <= vma->vm_start) {
3589+ if (check_heap_stack_gap(vma, addr - len, len)) {
3590 /* remember the address as a hint for next time */
3591 return (mm->free_area_cache = addr-len);
3592 }
3593@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3594 if (unlikely(mm->mmap_base < len))
3595 goto bottomup;
3596
3597- addr = mm->mmap_base-len;
3598- if (do_color_align)
3599- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3600+ addr = mm->mmap_base - len;
3601
3602 do {
3603+ if (do_color_align)
3604+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3605 /*
3606 * Lookup failure means no vma is above this address,
3607 * else if new region fits below vma->vm_start,
3608 * return with success:
3609 */
3610 vma = find_vma(mm, addr);
3611- if (likely(!vma || addr+len <= vma->vm_start)) {
3612+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3613 /* remember the address as a hint for next time */
3614 return (mm->free_area_cache = addr);
3615 }
3616@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3617 mm->cached_hole_size = vma->vm_start - addr;
3618
3619 /* try just below the current vma->vm_start */
3620- addr = vma->vm_start-len;
3621- if (do_color_align)
3622- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3623- } while (likely(len < vma->vm_start));
3624+ addr = skip_heap_stack_gap(vma, len);
3625+ } while (!IS_ERR_VALUE(addr));
3626
3627 bottomup:
3628 /*
3629@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3630 gap == RLIM_INFINITY ||
3631 sysctl_legacy_va_layout) {
3632 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3633+
3634+#ifdef CONFIG_PAX_RANDMMAP
3635+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3636+ mm->mmap_base += mm->delta_mmap;
3637+#endif
3638+
3639 mm->get_unmapped_area = arch_get_unmapped_area;
3640 mm->unmap_area = arch_unmap_area;
3641 } else {
3642@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3643 gap = (task_size / 6 * 5);
3644
3645 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3646+
3647+#ifdef CONFIG_PAX_RANDMMAP
3648+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3649+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3650+#endif
3651+
3652 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3653 mm->unmap_area = arch_unmap_area_topdown;
3654 }
3655diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3656--- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3657+++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3658@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3659 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3660 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3661
3662+extern void gr_handle_kernel_exploit(void);
3663+
3664 void die_if_kernel(char *str, struct pt_regs *regs)
3665 {
3666 static int die_counter;
3667@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3668 count++ < 30 &&
3669 (((unsigned long) rw) >= PAGE_OFFSET) &&
3670 !(((unsigned long) rw) & 0x7)) {
3671- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3672+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3673 (void *) rw->ins[7]);
3674 rw = (struct reg_window32 *)rw->ins[6];
3675 }
3676 }
3677 printk("Instruction DUMP:");
3678 instruction_dump ((unsigned long *) regs->pc);
3679- if(regs->psr & PSR_PS)
3680+ if(regs->psr & PSR_PS) {
3681+ gr_handle_kernel_exploit();
3682 do_exit(SIGKILL);
3683+ }
3684 do_exit(SIGSEGV);
3685 }
3686
3687diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3688--- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3689+++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3690@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3691 i + 1,
3692 p->trapstack[i].tstate, p->trapstack[i].tpc,
3693 p->trapstack[i].tnpc, p->trapstack[i].tt);
3694- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3695+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3696 }
3697 }
3698
3699@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3700
3701 lvl -= 0x100;
3702 if (regs->tstate & TSTATE_PRIV) {
3703+
3704+#ifdef CONFIG_PAX_REFCOUNT
3705+ if (lvl == 6)
3706+ pax_report_refcount_overflow(regs);
3707+#endif
3708+
3709 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3710 die_if_kernel(buffer, regs);
3711 }
3712@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3713 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3714 {
3715 char buffer[32];
3716-
3717+
3718 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3719 0, lvl, SIGTRAP) == NOTIFY_STOP)
3720 return;
3721
3722+#ifdef CONFIG_PAX_REFCOUNT
3723+ if (lvl == 6)
3724+ pax_report_refcount_overflow(regs);
3725+#endif
3726+
3727 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3728
3729 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3730@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3731 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3732 printk("%s" "ERROR(%d): ",
3733 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3734- printk("TPC<%pS>\n", (void *) regs->tpc);
3735+ printk("TPC<%pA>\n", (void *) regs->tpc);
3736 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3737 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3738 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3739@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3740 smp_processor_id(),
3741 (type & 0x1) ? 'I' : 'D',
3742 regs->tpc);
3743- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3744+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3745 panic("Irrecoverable Cheetah+ parity error.");
3746 }
3747
3748@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3749 smp_processor_id(),
3750 (type & 0x1) ? 'I' : 'D',
3751 regs->tpc);
3752- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3753+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3754 }
3755
3756 struct sun4v_error_entry {
3757@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3758
3759 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3760 regs->tpc, tl);
3761- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3762+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3763 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3764- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3765+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3766 (void *) regs->u_regs[UREG_I7]);
3767 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3768 "pte[%lx] error[%lx]\n",
3769@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3770
3771 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3772 regs->tpc, tl);
3773- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3774+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3775 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3776- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3777+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3778 (void *) regs->u_regs[UREG_I7]);
3779 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3780 "pte[%lx] error[%lx]\n",
3781@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3782 fp = (unsigned long)sf->fp + STACK_BIAS;
3783 }
3784
3785- printk(" [%016lx] %pS\n", pc, (void *) pc);
3786+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3788 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3789 int index = tsk->curr_ret_stack;
3790 if (tsk->ret_stack && index >= graph) {
3791 pc = tsk->ret_stack[index - graph].ret;
3792- printk(" [%016lx] %pS\n", pc, (void *) pc);
3793+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3794 graph++;
3795 }
3796 }
3797@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3798 return (struct reg_window *) (fp + STACK_BIAS);
3799 }
3800
3801+extern void gr_handle_kernel_exploit(void);
3802+
3803 void die_if_kernel(char *str, struct pt_regs *regs)
3804 {
3805 static int die_counter;
3806@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3807 while (rw &&
3808 count++ < 30 &&
3809 kstack_valid(tp, (unsigned long) rw)) {
3810- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3811+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3812 (void *) rw->ins[7]);
3813
3814 rw = kernel_stack_up(rw);
3815@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3816 }
3817 user_instruction_dump ((unsigned int __user *) regs->tpc);
3818 }
3819- if (regs->tstate & TSTATE_PRIV)
3820+ if (regs->tstate & TSTATE_PRIV) {
3821+ gr_handle_kernel_exploit();
3822 do_exit(SIGKILL);
3823+ }
3824 do_exit(SIGSEGV);
3825 }
3826 EXPORT_SYMBOL(die_if_kernel);
3827diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3828--- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3829+++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3830@@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3831 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3832
3833 if (__ratelimit(&ratelimit)) {
3834- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3835+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3836 regs->tpc, (void *) regs->tpc);
3837 }
3838 }
3839diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3840--- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3841+++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3842@@ -18,7 +18,12 @@
3843 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3844 BACKOFF_SETUP(%o2)
3845 1: lduw [%o1], %g1
3846- add %g1, %o0, %g7
3847+ addcc %g1, %o0, %g7
3848+
3849+#ifdef CONFIG_PAX_REFCOUNT
3850+ tvs %icc, 6
3851+#endif
3852+
3853 cas [%o1], %g1, %g7
3854 cmp %g1, %g7
3855 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3856@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3857 2: BACKOFF_SPIN(%o2, %o3, 1b)
3858 .size atomic_add, .-atomic_add
3859
3860+ .globl atomic_add_unchecked
3861+ .type atomic_add_unchecked,#function
3862+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3863+ BACKOFF_SETUP(%o2)
3864+1: lduw [%o1], %g1
3865+ add %g1, %o0, %g7
3866+ cas [%o1], %g1, %g7
3867+ cmp %g1, %g7
3868+ bne,pn %icc, 2f
3869+ nop
3870+ retl
3871+ nop
3872+2: BACKOFF_SPIN(%o2, %o3, 1b)
3873+ .size atomic_add_unchecked, .-atomic_add_unchecked
3874+
3875 .globl atomic_sub
3876 .type atomic_sub,#function
3877 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3878 BACKOFF_SETUP(%o2)
3879 1: lduw [%o1], %g1
3880- sub %g1, %o0, %g7
3881+ subcc %g1, %o0, %g7
3882+
3883+#ifdef CONFIG_PAX_REFCOUNT
3884+ tvs %icc, 6
3885+#endif
3886+
3887 cas [%o1], %g1, %g7
3888 cmp %g1, %g7
3889 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3890@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3891 2: BACKOFF_SPIN(%o2, %o3, 1b)
3892 .size atomic_sub, .-atomic_sub
3893
3894+ .globl atomic_sub_unchecked
3895+ .type atomic_sub_unchecked,#function
3896+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3897+ BACKOFF_SETUP(%o2)
3898+1: lduw [%o1], %g1
3899+ sub %g1, %o0, %g7
3900+ cas [%o1], %g1, %g7
3901+ cmp %g1, %g7
3902+ bne,pn %icc, 2f
3903+ nop
3904+ retl
3905+ nop
3906+2: BACKOFF_SPIN(%o2, %o3, 1b)
3907+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3908+
3909 .globl atomic_add_ret
3910 .type atomic_add_ret,#function
3911 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3912 BACKOFF_SETUP(%o2)
3913 1: lduw [%o1], %g1
3914- add %g1, %o0, %g7
3915+ addcc %g1, %o0, %g7
3916+
3917+#ifdef CONFIG_PAX_REFCOUNT
3918+ tvs %icc, 6
3919+#endif
3920+
3921 cas [%o1], %g1, %g7
3922 cmp %g1, %g7
3923 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3924@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3925 2: BACKOFF_SPIN(%o2, %o3, 1b)
3926 .size atomic_add_ret, .-atomic_add_ret
3927
3928+ .globl atomic_add_ret_unchecked
3929+ .type atomic_add_ret_unchecked,#function
3930+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3931+ BACKOFF_SETUP(%o2)
3932+1: lduw [%o1], %g1
3933+ addcc %g1, %o0, %g7
3934+ cas [%o1], %g1, %g7
3935+ cmp %g1, %g7
3936+ bne,pn %icc, 2f
3937+ add %g7, %o0, %g7
3938+ sra %g7, 0, %o0
3939+ retl
3940+ nop
3941+2: BACKOFF_SPIN(%o2, %o3, 1b)
3942+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3943+
3944 .globl atomic_sub_ret
3945 .type atomic_sub_ret,#function
3946 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3947 BACKOFF_SETUP(%o2)
3948 1: lduw [%o1], %g1
3949- sub %g1, %o0, %g7
3950+ subcc %g1, %o0, %g7
3951+
3952+#ifdef CONFIG_PAX_REFCOUNT
3953+ tvs %icc, 6
3954+#endif
3955+
3956 cas [%o1], %g1, %g7
3957 cmp %g1, %g7
3958 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3959@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3960 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3961 BACKOFF_SETUP(%o2)
3962 1: ldx [%o1], %g1
3963- add %g1, %o0, %g7
3964+ addcc %g1, %o0, %g7
3965+
3966+#ifdef CONFIG_PAX_REFCOUNT
3967+ tvs %xcc, 6
3968+#endif
3969+
3970 casx [%o1], %g1, %g7
3971 cmp %g1, %g7
3972 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3973@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3974 2: BACKOFF_SPIN(%o2, %o3, 1b)
3975 .size atomic64_add, .-atomic64_add
3976
3977+ .globl atomic64_add_unchecked
3978+ .type atomic64_add_unchecked,#function
3979+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3980+ BACKOFF_SETUP(%o2)
3981+1: ldx [%o1], %g1
3982+ addcc %g1, %o0, %g7
3983+ casx [%o1], %g1, %g7
3984+ cmp %g1, %g7
3985+ bne,pn %xcc, 2f
3986+ nop
3987+ retl
3988+ nop
3989+2: BACKOFF_SPIN(%o2, %o3, 1b)
3990+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3991+
3992 .globl atomic64_sub
3993 .type atomic64_sub,#function
3994 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3995 BACKOFF_SETUP(%o2)
3996 1: ldx [%o1], %g1
3997- sub %g1, %o0, %g7
3998+ subcc %g1, %o0, %g7
3999+
4000+#ifdef CONFIG_PAX_REFCOUNT
4001+ tvs %xcc, 6
4002+#endif
4003+
4004 casx [%o1], %g1, %g7
4005 cmp %g1, %g7
4006 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4007@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4008 2: BACKOFF_SPIN(%o2, %o3, 1b)
4009 .size atomic64_sub, .-atomic64_sub
4010
4011+ .globl atomic64_sub_unchecked
4012+ .type atomic64_sub_unchecked,#function
4013+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4014+ BACKOFF_SETUP(%o2)
4015+1: ldx [%o1], %g1
4016+ subcc %g1, %o0, %g7
4017+ casx [%o1], %g1, %g7
4018+ cmp %g1, %g7
4019+ bne,pn %xcc, 2f
4020+ nop
4021+ retl
4022+ nop
4023+2: BACKOFF_SPIN(%o2, %o3, 1b)
4024+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4025+
4026 .globl atomic64_add_ret
4027 .type atomic64_add_ret,#function
4028 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4029 BACKOFF_SETUP(%o2)
4030 1: ldx [%o1], %g1
4031- add %g1, %o0, %g7
4032+ addcc %g1, %o0, %g7
4033+
4034+#ifdef CONFIG_PAX_REFCOUNT
4035+ tvs %xcc, 6
4036+#endif
4037+
4038 casx [%o1], %g1, %g7
4039 cmp %g1, %g7
4040 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4041@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4042 2: BACKOFF_SPIN(%o2, %o3, 1b)
4043 .size atomic64_add_ret, .-atomic64_add_ret
4044
4045+ .globl atomic64_add_ret_unchecked
4046+ .type atomic64_add_ret_unchecked,#function
4047+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4048+ BACKOFF_SETUP(%o2)
4049+1: ldx [%o1], %g1
4050+ addcc %g1, %o0, %g7
4051+ casx [%o1], %g1, %g7
4052+ cmp %g1, %g7
4053+ bne,pn %xcc, 2f
4054+ add %g7, %o0, %g7
4055+ mov %g7, %o0
4056+ retl
4057+ nop
4058+2: BACKOFF_SPIN(%o2, %o3, 1b)
4059+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4060+
4061 .globl atomic64_sub_ret
4062 .type atomic64_sub_ret,#function
4063 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4064 BACKOFF_SETUP(%o2)
4065 1: ldx [%o1], %g1
4066- sub %g1, %o0, %g7
4067+ subcc %g1, %o0, %g7
4068+
4069+#ifdef CONFIG_PAX_REFCOUNT
4070+ tvs %xcc, 6
4071+#endif
4072+
4073 casx [%o1], %g1, %g7
4074 cmp %g1, %g7
4075 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4076diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4077--- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4078+++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-05 19:44:33.000000000 -0400
4079@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4080
4081 /* Atomic counter implementation. */
4082 EXPORT_SYMBOL(atomic_add);
4083+EXPORT_SYMBOL(atomic_add_unchecked);
4084 EXPORT_SYMBOL(atomic_add_ret);
4085 EXPORT_SYMBOL(atomic_sub);
4086+EXPORT_SYMBOL(atomic_sub_unchecked);
4087 EXPORT_SYMBOL(atomic_sub_ret);
4088 EXPORT_SYMBOL(atomic64_add);
4089+EXPORT_SYMBOL(atomic64_add_unchecked);
4090 EXPORT_SYMBOL(atomic64_add_ret);
4091+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4092 EXPORT_SYMBOL(atomic64_sub);
4093+EXPORT_SYMBOL(atomic64_sub_unchecked);
4094 EXPORT_SYMBOL(atomic64_sub_ret);
4095
4096 /* Atomic bit operations. */
4097diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4098--- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4099+++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4100@@ -2,7 +2,7 @@
4101 #
4102
4103 asflags-y := -ansi -DST_DIV0=0x02
4104-ccflags-y := -Werror
4105+#ccflags-y := -Werror
4106
4107 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4108 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4109diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4110--- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4111+++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4112@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4113 # Export what is needed by arch/sparc/boot/Makefile
4114 export VMLINUX_INIT VMLINUX_MAIN
4115 VMLINUX_INIT := $(head-y) $(init-y)
4116-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4117+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4118 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4119 VMLINUX_MAIN += $(drivers-y) $(net-y)
4120
4121diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4122--- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4123+++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4124@@ -22,6 +22,9 @@
4125 #include <linux/interrupt.h>
4126 #include <linux/module.h>
4127 #include <linux/kdebug.h>
4128+#include <linux/slab.h>
4129+#include <linux/pagemap.h>
4130+#include <linux/compiler.h>
4131
4132 #include <asm/system.h>
4133 #include <asm/page.h>
4134@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4135 return safe_compute_effective_address(regs, insn);
4136 }
4137
4138+#ifdef CONFIG_PAX_PAGEEXEC
4139+#ifdef CONFIG_PAX_DLRESOLVE
4140+static void pax_emuplt_close(struct vm_area_struct *vma)
4141+{
4142+ vma->vm_mm->call_dl_resolve = 0UL;
4143+}
4144+
4145+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4146+{
4147+ unsigned int *kaddr;
4148+
4149+ vmf->page = alloc_page(GFP_HIGHUSER);
4150+ if (!vmf->page)
4151+ return VM_FAULT_OOM;
4152+
4153+ kaddr = kmap(vmf->page);
4154+ memset(kaddr, 0, PAGE_SIZE);
4155+ kaddr[0] = 0x9DE3BFA8U; /* save */
4156+ flush_dcache_page(vmf->page);
4157+ kunmap(vmf->page);
4158+ return VM_FAULT_MAJOR;
4159+}
4160+
4161+static const struct vm_operations_struct pax_vm_ops = {
4162+ .close = pax_emuplt_close,
4163+ .fault = pax_emuplt_fault
4164+};
4165+
4166+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4167+{
4168+ int ret;
4169+
4170+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4171+ vma->vm_mm = current->mm;
4172+ vma->vm_start = addr;
4173+ vma->vm_end = addr + PAGE_SIZE;
4174+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4175+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4176+ vma->vm_ops = &pax_vm_ops;
4177+
4178+ ret = insert_vm_struct(current->mm, vma);
4179+ if (ret)
4180+ return ret;
4181+
4182+ ++current->mm->total_vm;
4183+ return 0;
4184+}
4185+#endif
4186+
4187+/*
4188+ * PaX: decide what to do with offenders (regs->pc = fault address)
4189+ *
4190+ * returns 1 when task should be killed
4191+ * 2 when patched PLT trampoline was detected
4192+ * 3 when unpatched PLT trampoline was detected
4193+ */
4194+static int pax_handle_fetch_fault(struct pt_regs *regs)
4195+{
4196+
4197+#ifdef CONFIG_PAX_EMUPLT
4198+ int err;
4199+
4200+ do { /* PaX: patched PLT emulation #1 */
4201+ unsigned int sethi1, sethi2, jmpl;
4202+
4203+ err = get_user(sethi1, (unsigned int *)regs->pc);
4204+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4205+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4206+
4207+ if (err)
4208+ break;
4209+
4210+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4211+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4212+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4213+ {
4214+ unsigned int addr;
4215+
4216+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4217+ addr = regs->u_regs[UREG_G1];
4218+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4219+ regs->pc = addr;
4220+ regs->npc = addr+4;
4221+ return 2;
4222+ }
4223+ } while (0);
4224+
4225+ { /* PaX: patched PLT emulation #2 */
4226+ unsigned int ba;
4227+
4228+ err = get_user(ba, (unsigned int *)regs->pc);
4229+
4230+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4231+ unsigned int addr;
4232+
4233+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4234+ regs->pc = addr;
4235+ regs->npc = addr+4;
4236+ return 2;
4237+ }
4238+ }
4239+
4240+ do { /* PaX: patched PLT emulation #3 */
4241+ unsigned int sethi, jmpl, nop;
4242+
4243+ err = get_user(sethi, (unsigned int *)regs->pc);
4244+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4245+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4246+
4247+ if (err)
4248+ break;
4249+
4250+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4251+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4252+ nop == 0x01000000U)
4253+ {
4254+ unsigned int addr;
4255+
4256+ addr = (sethi & 0x003FFFFFU) << 10;
4257+ regs->u_regs[UREG_G1] = addr;
4258+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4259+ regs->pc = addr;
4260+ regs->npc = addr+4;
4261+ return 2;
4262+ }
4263+ } while (0);
4264+
4265+ do { /* PaX: unpatched PLT emulation step 1 */
4266+ unsigned int sethi, ba, nop;
4267+
4268+ err = get_user(sethi, (unsigned int *)regs->pc);
4269+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4270+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4271+
4272+ if (err)
4273+ break;
4274+
4275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4276+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4277+ nop == 0x01000000U)
4278+ {
4279+ unsigned int addr, save, call;
4280+
4281+ if ((ba & 0xFFC00000U) == 0x30800000U)
4282+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4283+ else
4284+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4285+
4286+ err = get_user(save, (unsigned int *)addr);
4287+ err |= get_user(call, (unsigned int *)(addr+4));
4288+ err |= get_user(nop, (unsigned int *)(addr+8));
4289+ if (err)
4290+ break;
4291+
4292+#ifdef CONFIG_PAX_DLRESOLVE
4293+ if (save == 0x9DE3BFA8U &&
4294+ (call & 0xC0000000U) == 0x40000000U &&
4295+ nop == 0x01000000U)
4296+ {
4297+ struct vm_area_struct *vma;
4298+ unsigned long call_dl_resolve;
4299+
4300+ down_read(&current->mm->mmap_sem);
4301+ call_dl_resolve = current->mm->call_dl_resolve;
4302+ up_read(&current->mm->mmap_sem);
4303+ if (likely(call_dl_resolve))
4304+ goto emulate;
4305+
4306+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4307+
4308+ down_write(&current->mm->mmap_sem);
4309+ if (current->mm->call_dl_resolve) {
4310+ call_dl_resolve = current->mm->call_dl_resolve;
4311+ up_write(&current->mm->mmap_sem);
4312+ if (vma)
4313+ kmem_cache_free(vm_area_cachep, vma);
4314+ goto emulate;
4315+ }
4316+
4317+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4318+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4319+ up_write(&current->mm->mmap_sem);
4320+ if (vma)
4321+ kmem_cache_free(vm_area_cachep, vma);
4322+ return 1;
4323+ }
4324+
4325+ if (pax_insert_vma(vma, call_dl_resolve)) {
4326+ up_write(&current->mm->mmap_sem);
4327+ kmem_cache_free(vm_area_cachep, vma);
4328+ return 1;
4329+ }
4330+
4331+ current->mm->call_dl_resolve = call_dl_resolve;
4332+ up_write(&current->mm->mmap_sem);
4333+
4334+emulate:
4335+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4336+ regs->pc = call_dl_resolve;
4337+ regs->npc = addr+4;
4338+ return 3;
4339+ }
4340+#endif
4341+
4342+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4343+ if ((save & 0xFFC00000U) == 0x05000000U &&
4344+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4345+ nop == 0x01000000U)
4346+ {
4347+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4348+ regs->u_regs[UREG_G2] = addr + 4;
4349+ addr = (save & 0x003FFFFFU) << 10;
4350+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4351+ regs->pc = addr;
4352+ regs->npc = addr+4;
4353+ return 3;
4354+ }
4355+ }
4356+ } while (0);
4357+
4358+ do { /* PaX: unpatched PLT emulation step 2 */
4359+ unsigned int save, call, nop;
4360+
4361+ err = get_user(save, (unsigned int *)(regs->pc-4));
4362+ err |= get_user(call, (unsigned int *)regs->pc);
4363+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4364+ if (err)
4365+ break;
4366+
4367+ if (save == 0x9DE3BFA8U &&
4368+ (call & 0xC0000000U) == 0x40000000U &&
4369+ nop == 0x01000000U)
4370+ {
4371+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4372+
4373+ regs->u_regs[UREG_RETPC] = regs->pc;
4374+ regs->pc = dl_resolve;
4375+ regs->npc = dl_resolve+4;
4376+ return 3;
4377+ }
4378+ } while (0);
4379+#endif
4380+
4381+ return 1;
4382+}
4383+
4384+void pax_report_insns(void *pc, void *sp)
4385+{
4386+ unsigned long i;
4387+
4388+ printk(KERN_ERR "PAX: bytes at PC: ");
4389+ for (i = 0; i < 8; i++) {
4390+ unsigned int c;
4391+ if (get_user(c, (unsigned int *)pc+i))
4392+ printk(KERN_CONT "???????? ");
4393+ else
4394+ printk(KERN_CONT "%08x ", c);
4395+ }
4396+ printk("\n");
4397+}
4398+#endif
4399+
4400 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4401 int text_fault)
4402 {
4403@@ -281,6 +546,24 @@ good_area:
4404 if(!(vma->vm_flags & VM_WRITE))
4405 goto bad_area;
4406 } else {
4407+
4408+#ifdef CONFIG_PAX_PAGEEXEC
4409+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4410+ up_read(&mm->mmap_sem);
4411+ switch (pax_handle_fetch_fault(regs)) {
4412+
4413+#ifdef CONFIG_PAX_EMUPLT
4414+ case 2:
4415+ case 3:
4416+ return;
4417+#endif
4418+
4419+ }
4420+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4421+ do_group_exit(SIGKILL);
4422+ }
4423+#endif
4424+
4425 /* Allow reads even for write-only mappings */
4426 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4427 goto bad_area;
4428diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4429--- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4430+++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4431@@ -21,6 +21,9 @@
4432 #include <linux/kprobes.h>
4433 #include <linux/kdebug.h>
4434 #include <linux/percpu.h>
4435+#include <linux/slab.h>
4436+#include <linux/pagemap.h>
4437+#include <linux/compiler.h>
4438
4439 #include <asm/page.h>
4440 #include <asm/pgtable.h>
4441@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4442 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4443 regs->tpc);
4444 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4445- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4446+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4447 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4448 dump_stack();
4449 unhandled_fault(regs->tpc, current, regs);
4450@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4451 show_regs(regs);
4452 }
4453
4454+#ifdef CONFIG_PAX_PAGEEXEC
4455+#ifdef CONFIG_PAX_DLRESOLVE
4456+static void pax_emuplt_close(struct vm_area_struct *vma)
4457+{
4458+ vma->vm_mm->call_dl_resolve = 0UL;
4459+}
4460+
4461+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4462+{
4463+ unsigned int *kaddr;
4464+
4465+ vmf->page = alloc_page(GFP_HIGHUSER);
4466+ if (!vmf->page)
4467+ return VM_FAULT_OOM;
4468+
4469+ kaddr = kmap(vmf->page);
4470+ memset(kaddr, 0, PAGE_SIZE);
4471+ kaddr[0] = 0x9DE3BFA8U; /* save */
4472+ flush_dcache_page(vmf->page);
4473+ kunmap(vmf->page);
4474+ return VM_FAULT_MAJOR;
4475+}
4476+
4477+static const struct vm_operations_struct pax_vm_ops = {
4478+ .close = pax_emuplt_close,
4479+ .fault = pax_emuplt_fault
4480+};
4481+
4482+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4483+{
4484+ int ret;
4485+
4486+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4487+ vma->vm_mm = current->mm;
4488+ vma->vm_start = addr;
4489+ vma->vm_end = addr + PAGE_SIZE;
4490+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4491+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4492+ vma->vm_ops = &pax_vm_ops;
4493+
4494+ ret = insert_vm_struct(current->mm, vma);
4495+ if (ret)
4496+ return ret;
4497+
4498+ ++current->mm->total_vm;
4499+ return 0;
4500+}
4501+#endif
4502+
4503+/*
4504+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4505+ *
4506+ * returns 1 when task should be killed
4507+ * 2 when patched PLT trampoline was detected
4508+ * 3 when unpatched PLT trampoline was detected
4509+ */
4510+static int pax_handle_fetch_fault(struct pt_regs *regs)
4511+{
4512+
4513+#ifdef CONFIG_PAX_EMUPLT
4514+ int err;
4515+
4516+ do { /* PaX: patched PLT emulation #1 */
4517+ unsigned int sethi1, sethi2, jmpl;
4518+
4519+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4520+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4521+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4522+
4523+ if (err)
4524+ break;
4525+
4526+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4527+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4528+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4529+ {
4530+ unsigned long addr;
4531+
4532+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4533+ addr = regs->u_regs[UREG_G1];
4534+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4535+
4536+ if (test_thread_flag(TIF_32BIT))
4537+ addr &= 0xFFFFFFFFUL;
4538+
4539+ regs->tpc = addr;
4540+ regs->tnpc = addr+4;
4541+ return 2;
4542+ }
4543+ } while (0);
4544+
4545+ { /* PaX: patched PLT emulation #2 */
4546+ unsigned int ba;
4547+
4548+ err = get_user(ba, (unsigned int *)regs->tpc);
4549+
4550+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4551+ unsigned long addr;
4552+
4553+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4554+
4555+ if (test_thread_flag(TIF_32BIT))
4556+ addr &= 0xFFFFFFFFUL;
4557+
4558+ regs->tpc = addr;
4559+ regs->tnpc = addr+4;
4560+ return 2;
4561+ }
4562+ }
4563+
4564+ do { /* PaX: patched PLT emulation #3 */
4565+ unsigned int sethi, jmpl, nop;
4566+
4567+ err = get_user(sethi, (unsigned int *)regs->tpc);
4568+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4569+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4576+ nop == 0x01000000U)
4577+ {
4578+ unsigned long addr;
4579+
4580+ addr = (sethi & 0x003FFFFFU) << 10;
4581+ regs->u_regs[UREG_G1] = addr;
4582+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #4 */
4594+ unsigned int sethi, mov1, call, mov2;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4600+
4601+ if (err)
4602+ break;
4603+
4604+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4605+ mov1 == 0x8210000FU &&
4606+ (call & 0xC0000000U) == 0x40000000U &&
4607+ mov2 == 0x9E100001U)
4608+ {
4609+ unsigned long addr;
4610+
4611+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4612+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4613+
4614+ if (test_thread_flag(TIF_32BIT))
4615+ addr &= 0xFFFFFFFFUL;
4616+
4617+ regs->tpc = addr;
4618+ regs->tnpc = addr+4;
4619+ return 2;
4620+ }
4621+ } while (0);
4622+
4623+ do { /* PaX: patched PLT emulation #5 */
4624+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4625+
4626+ err = get_user(sethi, (unsigned int *)regs->tpc);
4627+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4628+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4629+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4630+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4631+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4632+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4633+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4634+
4635+ if (err)
4636+ break;
4637+
4638+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4639+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4640+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4641+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4642+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4643+ sllx == 0x83287020U &&
4644+ jmpl == 0x81C04005U &&
4645+ nop == 0x01000000U)
4646+ {
4647+ unsigned long addr;
4648+
4649+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4650+ regs->u_regs[UREG_G1] <<= 32;
4651+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4652+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4653+ regs->tpc = addr;
4654+ regs->tnpc = addr+4;
4655+ return 2;
4656+ }
4657+ } while (0);
4658+
4659+ do { /* PaX: patched PLT emulation #6 */
4660+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4661+
4662+ err = get_user(sethi, (unsigned int *)regs->tpc);
4663+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4664+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4665+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4666+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4667+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4675+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4676+ sllx == 0x83287020U &&
4677+ (or & 0xFFFFE000U) == 0x8A116000U &&
4678+ jmpl == 0x81C04005U &&
4679+ nop == 0x01000000U)
4680+ {
4681+ unsigned long addr;
4682+
4683+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4684+ regs->u_regs[UREG_G1] <<= 32;
4685+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4686+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4687+ regs->tpc = addr;
4688+ regs->tnpc = addr+4;
4689+ return 2;
4690+ }
4691+ } while (0);
4692+
4693+ do { /* PaX: unpatched PLT emulation step 1 */
4694+ unsigned int sethi, ba, nop;
4695+
4696+ err = get_user(sethi, (unsigned int *)regs->tpc);
4697+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4698+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4699+
4700+ if (err)
4701+ break;
4702+
4703+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4704+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4705+ nop == 0x01000000U)
4706+ {
4707+ unsigned long addr;
4708+ unsigned int save, call;
4709+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4710+
4711+ if ((ba & 0xFFC00000U) == 0x30800000U)
4712+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4713+ else
4714+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4715+
4716+ if (test_thread_flag(TIF_32BIT))
4717+ addr &= 0xFFFFFFFFUL;
4718+
4719+ err = get_user(save, (unsigned int *)addr);
4720+ err |= get_user(call, (unsigned int *)(addr+4));
4721+ err |= get_user(nop, (unsigned int *)(addr+8));
4722+ if (err)
4723+ break;
4724+
4725+#ifdef CONFIG_PAX_DLRESOLVE
4726+ if (save == 0x9DE3BFA8U &&
4727+ (call & 0xC0000000U) == 0x40000000U &&
4728+ nop == 0x01000000U)
4729+ {
4730+ struct vm_area_struct *vma;
4731+ unsigned long call_dl_resolve;
4732+
4733+ down_read(&current->mm->mmap_sem);
4734+ call_dl_resolve = current->mm->call_dl_resolve;
4735+ up_read(&current->mm->mmap_sem);
4736+ if (likely(call_dl_resolve))
4737+ goto emulate;
4738+
4739+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4740+
4741+ down_write(&current->mm->mmap_sem);
4742+ if (current->mm->call_dl_resolve) {
4743+ call_dl_resolve = current->mm->call_dl_resolve;
4744+ up_write(&current->mm->mmap_sem);
4745+ if (vma)
4746+ kmem_cache_free(vm_area_cachep, vma);
4747+ goto emulate;
4748+ }
4749+
4750+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4751+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4752+ up_write(&current->mm->mmap_sem);
4753+ if (vma)
4754+ kmem_cache_free(vm_area_cachep, vma);
4755+ return 1;
4756+ }
4757+
4758+ if (pax_insert_vma(vma, call_dl_resolve)) {
4759+ up_write(&current->mm->mmap_sem);
4760+ kmem_cache_free(vm_area_cachep, vma);
4761+ return 1;
4762+ }
4763+
4764+ current->mm->call_dl_resolve = call_dl_resolve;
4765+ up_write(&current->mm->mmap_sem);
4766+
4767+emulate:
4768+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4769+ regs->tpc = call_dl_resolve;
4770+ regs->tnpc = addr+4;
4771+ return 3;
4772+ }
4773+#endif
4774+
4775+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4776+ if ((save & 0xFFC00000U) == 0x05000000U &&
4777+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4778+ nop == 0x01000000U)
4779+ {
4780+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4781+ regs->u_regs[UREG_G2] = addr + 4;
4782+ addr = (save & 0x003FFFFFU) << 10;
4783+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4784+
4785+ if (test_thread_flag(TIF_32BIT))
4786+ addr &= 0xFFFFFFFFUL;
4787+
4788+ regs->tpc = addr;
4789+ regs->tnpc = addr+4;
4790+ return 3;
4791+ }
4792+
4793+ /* PaX: 64-bit PLT stub */
4794+ err = get_user(sethi1, (unsigned int *)addr);
4795+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4796+ err |= get_user(or1, (unsigned int *)(addr+8));
4797+ err |= get_user(or2, (unsigned int *)(addr+12));
4798+ err |= get_user(sllx, (unsigned int *)(addr+16));
4799+ err |= get_user(add, (unsigned int *)(addr+20));
4800+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4801+ err |= get_user(nop, (unsigned int *)(addr+28));
4802+ if (err)
4803+ break;
4804+
4805+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4806+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4807+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4808+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4809+ sllx == 0x89293020U &&
4810+ add == 0x8A010005U &&
4811+ jmpl == 0x89C14000U &&
4812+ nop == 0x01000000U)
4813+ {
4814+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4815+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4816+ regs->u_regs[UREG_G4] <<= 32;
4817+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4818+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4819+ regs->u_regs[UREG_G4] = addr + 24;
4820+ addr = regs->u_regs[UREG_G5];
4821+ regs->tpc = addr;
4822+ regs->tnpc = addr+4;
4823+ return 3;
4824+ }
4825+ }
4826+ } while (0);
4827+
4828+#ifdef CONFIG_PAX_DLRESOLVE
4829+ do { /* PaX: unpatched PLT emulation step 2 */
4830+ unsigned int save, call, nop;
4831+
4832+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4833+ err |= get_user(call, (unsigned int *)regs->tpc);
4834+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4835+ if (err)
4836+ break;
4837+
4838+ if (save == 0x9DE3BFA8U &&
4839+ (call & 0xC0000000U) == 0x40000000U &&
4840+ nop == 0x01000000U)
4841+ {
4842+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4843+
4844+ if (test_thread_flag(TIF_32BIT))
4845+ dl_resolve &= 0xFFFFFFFFUL;
4846+
4847+ regs->u_regs[UREG_RETPC] = regs->tpc;
4848+ regs->tpc = dl_resolve;
4849+ regs->tnpc = dl_resolve+4;
4850+ return 3;
4851+ }
4852+ } while (0);
4853+#endif
4854+
4855+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4856+ unsigned int sethi, ba, nop;
4857+
4858+ err = get_user(sethi, (unsigned int *)regs->tpc);
4859+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4860+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4861+
4862+ if (err)
4863+ break;
4864+
4865+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4866+ (ba & 0xFFF00000U) == 0x30600000U &&
4867+ nop == 0x01000000U)
4868+ {
4869+ unsigned long addr;
4870+
4871+ addr = (sethi & 0x003FFFFFU) << 10;
4872+ regs->u_regs[UREG_G1] = addr;
4873+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4874+
4875+ if (test_thread_flag(TIF_32BIT))
4876+ addr &= 0xFFFFFFFFUL;
4877+
4878+ regs->tpc = addr;
4879+ regs->tnpc = addr+4;
4880+ return 2;
4881+ }
4882+ } while (0);
4883+
4884+#endif
4885+
4886+ return 1;
4887+}
4888+
4889+void pax_report_insns(void *pc, void *sp)
4890+{
4891+ unsigned long i;
4892+
4893+ printk(KERN_ERR "PAX: bytes at PC: ");
4894+ for (i = 0; i < 8; i++) {
4895+ unsigned int c;
4896+ if (get_user(c, (unsigned int *)pc+i))
4897+ printk(KERN_CONT "???????? ");
4898+ else
4899+ printk(KERN_CONT "%08x ", c);
4900+ }
4901+ printk("\n");
4902+}
4903+#endif
4904+
4905 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4906 {
4907 struct mm_struct *mm = current->mm;
4908@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4909 if (!vma)
4910 goto bad_area;
4911
4912+#ifdef CONFIG_PAX_PAGEEXEC
4913+ /* PaX: detect ITLB misses on non-exec pages */
4914+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4915+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4916+ {
4917+ if (address != regs->tpc)
4918+ goto good_area;
4919+
4920+ up_read(&mm->mmap_sem);
4921+ switch (pax_handle_fetch_fault(regs)) {
4922+
4923+#ifdef CONFIG_PAX_EMUPLT
4924+ case 2:
4925+ case 3:
4926+ return;
4927+#endif
4928+
4929+ }
4930+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4931+ do_group_exit(SIGKILL);
4932+ }
4933+#endif
4934+
4935 /* Pure DTLB misses do not tell us whether the fault causing
4936 * load/store/atomic was a write or not, it only says that there
4937 * was no match. So in such a case we (carefully) read the
4938diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4939--- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4940+++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4941@@ -68,7 +68,7 @@ full_search:
4942 }
4943 return -ENOMEM;
4944 }
4945- if (likely(!vma || addr + len <= vma->vm_start)) {
4946+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4947 /*
4948 * Remember the place where we stopped the search:
4949 */
4950@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4951 /* make sure it can fit in the remaining address space */
4952 if (likely(addr > len)) {
4953 vma = find_vma(mm, addr-len);
4954- if (!vma || addr <= vma->vm_start) {
4955+ if (check_heap_stack_gap(vma, addr - len, len)) {
4956 /* remember the address as a hint for next time */
4957 return (mm->free_area_cache = addr-len);
4958 }
4959@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4960 if (unlikely(mm->mmap_base < len))
4961 goto bottomup;
4962
4963- addr = (mm->mmap_base-len) & HPAGE_MASK;
4964+ addr = mm->mmap_base - len;
4965
4966 do {
4967+ addr &= HPAGE_MASK;
4968 /*
4969 * Lookup failure means no vma is above this address,
4970 * else if new region fits below vma->vm_start,
4971 * return with success:
4972 */
4973 vma = find_vma(mm, addr);
4974- if (likely(!vma || addr+len <= vma->vm_start)) {
4975+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4976 /* remember the address as a hint for next time */
4977 return (mm->free_area_cache = addr);
4978 }
4979@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4980 mm->cached_hole_size = vma->vm_start - addr;
4981
4982 /* try just below the current vma->vm_start */
4983- addr = (vma->vm_start-len) & HPAGE_MASK;
4984- } while (likely(len < vma->vm_start));
4985+ addr = skip_heap_stack_gap(vma, len);
4986+ } while (!IS_ERR_VALUE(addr));
4987
4988 bottomup:
4989 /*
4990@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4991 if (addr) {
4992 addr = ALIGN(addr, HPAGE_SIZE);
4993 vma = find_vma(mm, addr);
4994- if (task_size - len >= addr &&
4995- (!vma || addr + len <= vma->vm_start))
4996+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4997 return addr;
4998 }
4999 if (mm->get_unmapped_area == arch_get_unmapped_area)
5000diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5001--- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5002+++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5003@@ -318,6 +318,9 @@ extern void device_scan(void);
5004 pgprot_t PAGE_SHARED __read_mostly;
5005 EXPORT_SYMBOL(PAGE_SHARED);
5006
5007+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5008+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5009+
5010 void __init paging_init(void)
5011 {
5012 switch(sparc_cpu_model) {
5013@@ -346,17 +349,17 @@ void __init paging_init(void)
5014
5015 /* Initialize the protection map with non-constant, MMU dependent values. */
5016 protection_map[0] = PAGE_NONE;
5017- protection_map[1] = PAGE_READONLY;
5018- protection_map[2] = PAGE_COPY;
5019- protection_map[3] = PAGE_COPY;
5020+ protection_map[1] = PAGE_READONLY_NOEXEC;
5021+ protection_map[2] = PAGE_COPY_NOEXEC;
5022+ protection_map[3] = PAGE_COPY_NOEXEC;
5023 protection_map[4] = PAGE_READONLY;
5024 protection_map[5] = PAGE_READONLY;
5025 protection_map[6] = PAGE_COPY;
5026 protection_map[7] = PAGE_COPY;
5027 protection_map[8] = PAGE_NONE;
5028- protection_map[9] = PAGE_READONLY;
5029- protection_map[10] = PAGE_SHARED;
5030- protection_map[11] = PAGE_SHARED;
5031+ protection_map[9] = PAGE_READONLY_NOEXEC;
5032+ protection_map[10] = PAGE_SHARED_NOEXEC;
5033+ protection_map[11] = PAGE_SHARED_NOEXEC;
5034 protection_map[12] = PAGE_READONLY;
5035 protection_map[13] = PAGE_READONLY;
5036 protection_map[14] = PAGE_SHARED;
5037diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5038--- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5039+++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5040@@ -2,7 +2,7 @@
5041 #
5042
5043 asflags-y := -ansi
5044-ccflags-y := -Werror
5045+#ccflags-y := -Werror
5046
5047 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5048 obj-y += fault_$(BITS).o
5049diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5050--- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5051+++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5052@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5053 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5054 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5055 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5056+
5057+#ifdef CONFIG_PAX_PAGEEXEC
5058+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5059+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5060+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5061+#endif
5062+
5063 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5064 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5065
5066diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5067--- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5068+++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5069@@ -23,6 +23,7 @@ enum km_type {
5070 KM_IRQ1,
5071 KM_SOFTIRQ0,
5072 KM_SOFTIRQ1,
5073+ KM_CLEARPAGE,
5074 KM_TYPE_NR
5075 };
5076
5077diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5078--- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5079+++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5080@@ -14,6 +14,9 @@
5081 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5082 #define PAGE_MASK (~(PAGE_SIZE-1))
5083
5084+#define ktla_ktva(addr) (addr)
5085+#define ktva_ktla(addr) (addr)
5086+
5087 #ifndef __ASSEMBLY__
5088
5089 struct page;
5090diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5091--- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5092+++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5093@@ -404,22 +404,6 @@ int singlestepping(void * t)
5094 return 2;
5095 }
5096
5097-/*
5098- * Only x86 and x86_64 have an arch_align_stack().
5099- * All other arches have "#define arch_align_stack(x) (x)"
5100- * in their asm/system.h
5101- * As this is included in UML from asm-um/system-generic.h,
5102- * we can use it to behave as the subarch does.
5103- */
5104-#ifndef arch_align_stack
5105-unsigned long arch_align_stack(unsigned long sp)
5106-{
5107- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5108- sp -= get_random_int() % 8192;
5109- return sp & ~0xf;
5110-}
5111-#endif
5112-
5113 unsigned long get_wchan(struct task_struct *p)
5114 {
5115 unsigned long stack_page, sp, ip;
5116diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5117--- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5118+++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5119@@ -11,6 +11,21 @@
5120 #include "asm/uaccess.h"
5121 #include "asm/unistd.h"
5122
5123+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5124+{
5125+ unsigned long pax_task_size = TASK_SIZE;
5126+
5127+#ifdef CONFIG_PAX_SEGMEXEC
5128+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5129+ pax_task_size = SEGMEXEC_TASK_SIZE;
5130+#endif
5131+
5132+ if (len > pax_task_size || addr > pax_task_size - len)
5133+ return -EINVAL;
5134+
5135+ return 0;
5136+}
5137+
5138 /*
5139 * The prototype on i386 is:
5140 *
5141diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5142--- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5143+++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5144@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5145 u8 v;
5146 const u32 *p = (const u32 *)addr;
5147
5148- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5149+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5150 return v;
5151 }
5152
5153@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5154
5155 static inline void set_bit(int nr, void *addr)
5156 {
5157- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5158+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5159 }
5160
5161 #endif /* BOOT_BITOPS_H */
5162diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5163--- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5164+++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5165@@ -85,7 +85,7 @@ static inline void io_delay(void)
5166 static inline u16 ds(void)
5167 {
5168 u16 seg;
5169- asm("movw %%ds,%0" : "=rm" (seg));
5170+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5171 return seg;
5172 }
5173
5174@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5175 static inline int memcmp(const void *s1, const void *s2, size_t len)
5176 {
5177 u8 diff;
5178- asm("repe; cmpsb; setnz %0"
5179+ asm volatile("repe; cmpsb; setnz %0"
5180 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5181 return diff;
5182 }
5183diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5184--- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5185+++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5186@@ -76,7 +76,7 @@ ENTRY(startup_32)
5187 notl %eax
5188 andl %eax, %ebx
5189 #else
5190- movl $LOAD_PHYSICAL_ADDR, %ebx
5191+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5192 #endif
5193
5194 /* Target address to relocate to for decompression */
5195@@ -162,7 +162,7 @@ relocated:
5196 * and where it was actually loaded.
5197 */
5198 movl %ebp, %ebx
5199- subl $LOAD_PHYSICAL_ADDR, %ebx
5200+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5201 jz 2f /* Nothing to be done if loaded at compiled addr. */
5202 /*
5203 * Process relocations.
5204@@ -170,8 +170,7 @@ relocated:
5205
5206 1: subl $4, %edi
5207 movl (%edi), %ecx
5208- testl %ecx, %ecx
5209- jz 2f
5210+ jecxz 2f
5211 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5212 jmp 1b
5213 2:
5214diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5215--- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5216+++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5217@@ -91,7 +91,7 @@ ENTRY(startup_32)
5218 notl %eax
5219 andl %eax, %ebx
5220 #else
5221- movl $LOAD_PHYSICAL_ADDR, %ebx
5222+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5223 #endif
5224
5225 /* Target address to relocate to for decompression */
5226@@ -233,7 +233,7 @@ ENTRY(startup_64)
5227 notq %rax
5228 andq %rax, %rbp
5229 #else
5230- movq $LOAD_PHYSICAL_ADDR, %rbp
5231+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5232 #endif
5233
5234 /* Target address to relocate to for decompression */
5235diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5236--- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5237+++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5238@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5239 KBUILD_CFLAGS += $(cflags-y)
5240 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5241 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5242+ifdef CONSTIFY_PLUGIN
5243+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5244+endif
5245
5246 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5247 GCOV_PROFILE := n
5248diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5249--- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5250+++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5251@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5252 case PT_LOAD:
5253 #ifdef CONFIG_RELOCATABLE
5254 dest = output;
5255- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5256+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5257 #else
5258 dest = (void *)(phdr->p_paddr);
5259 #endif
5260@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5261 error("Destination address too large");
5262 #endif
5263 #ifndef CONFIG_RELOCATABLE
5264- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5265+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5266 error("Wrong destination address");
5267 #endif
5268
5269diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5270--- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5271+++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5272@@ -13,8 +13,11 @@
5273
5274 static void die(char *fmt, ...);
5275
5276+#include "../../../../include/generated/autoconf.h"
5277+
5278 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5279 static Elf32_Ehdr ehdr;
5280+static Elf32_Phdr *phdr;
5281 static unsigned long reloc_count, reloc_idx;
5282 static unsigned long *relocs;
5283
5284@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5285 }
5286 }
5287
5288+static void read_phdrs(FILE *fp)
5289+{
5290+ unsigned int i;
5291+
5292+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5293+ if (!phdr) {
5294+ die("Unable to allocate %d program headers\n",
5295+ ehdr.e_phnum);
5296+ }
5297+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5298+ die("Seek to %d failed: %s\n",
5299+ ehdr.e_phoff, strerror(errno));
5300+ }
5301+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5302+ die("Cannot read ELF program headers: %s\n",
5303+ strerror(errno));
5304+ }
5305+ for(i = 0; i < ehdr.e_phnum; i++) {
5306+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5307+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5308+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5309+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5310+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5311+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5312+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5313+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5314+ }
5315+
5316+}
5317+
5318 static void read_shdrs(FILE *fp)
5319 {
5320- int i;
5321+ unsigned int i;
5322 Elf32_Shdr shdr;
5323
5324 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5325@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5326
5327 static void read_strtabs(FILE *fp)
5328 {
5329- int i;
5330+ unsigned int i;
5331 for (i = 0; i < ehdr.e_shnum; i++) {
5332 struct section *sec = &secs[i];
5333 if (sec->shdr.sh_type != SHT_STRTAB) {
5334@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5335
5336 static void read_symtabs(FILE *fp)
5337 {
5338- int i,j;
5339+ unsigned int i,j;
5340 for (i = 0; i < ehdr.e_shnum; i++) {
5341 struct section *sec = &secs[i];
5342 if (sec->shdr.sh_type != SHT_SYMTAB) {
5343@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5344
5345 static void read_relocs(FILE *fp)
5346 {
5347- int i,j;
5348+ unsigned int i,j;
5349+ uint32_t base;
5350+
5351 for (i = 0; i < ehdr.e_shnum; i++) {
5352 struct section *sec = &secs[i];
5353 if (sec->shdr.sh_type != SHT_REL) {
5354@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5355 die("Cannot read symbol table: %s\n",
5356 strerror(errno));
5357 }
5358+ base = 0;
5359+ for (j = 0; j < ehdr.e_phnum; j++) {
5360+ if (phdr[j].p_type != PT_LOAD )
5361+ continue;
5362+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5363+ continue;
5364+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5365+ break;
5366+ }
5367 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5368 Elf32_Rel *rel = &sec->reltab[j];
5369- rel->r_offset = elf32_to_cpu(rel->r_offset);
5370+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5371 rel->r_info = elf32_to_cpu(rel->r_info);
5372 }
5373 }
5374@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5375
5376 static void print_absolute_symbols(void)
5377 {
5378- int i;
5379+ unsigned int i;
5380 printf("Absolute symbols\n");
5381 printf(" Num: Value Size Type Bind Visibility Name\n");
5382 for (i = 0; i < ehdr.e_shnum; i++) {
5383 struct section *sec = &secs[i];
5384 char *sym_strtab;
5385 Elf32_Sym *sh_symtab;
5386- int j;
5387+ unsigned int j;
5388
5389 if (sec->shdr.sh_type != SHT_SYMTAB) {
5390 continue;
5391@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5392
5393 static void print_absolute_relocs(void)
5394 {
5395- int i, printed = 0;
5396+ unsigned int i, printed = 0;
5397
5398 for (i = 0; i < ehdr.e_shnum; i++) {
5399 struct section *sec = &secs[i];
5400 struct section *sec_applies, *sec_symtab;
5401 char *sym_strtab;
5402 Elf32_Sym *sh_symtab;
5403- int j;
5404+ unsigned int j;
5405 if (sec->shdr.sh_type != SHT_REL) {
5406 continue;
5407 }
5408@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5409
5410 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5411 {
5412- int i;
5413+ unsigned int i;
5414 /* Walk through the relocations */
5415 for (i = 0; i < ehdr.e_shnum; i++) {
5416 char *sym_strtab;
5417 Elf32_Sym *sh_symtab;
5418 struct section *sec_applies, *sec_symtab;
5419- int j;
5420+ unsigned int j;
5421 struct section *sec = &secs[i];
5422
5423 if (sec->shdr.sh_type != SHT_REL) {
5424@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5425 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5426 continue;
5427 }
5428+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5429+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5430+ continue;
5431+
5432+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5433+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5434+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5435+ continue;
5436+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5437+ continue;
5438+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5439+ continue;
5440+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5441+ continue;
5442+#endif
5443+
5444 switch (r_type) {
5445 case R_386_NONE:
5446 case R_386_PC32:
5447@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5448
5449 static void emit_relocs(int as_text)
5450 {
5451- int i;
5452+ unsigned int i;
5453 /* Count how many relocations I have and allocate space for them. */
5454 reloc_count = 0;
5455 walk_relocs(count_reloc);
5456@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5457 fname, strerror(errno));
5458 }
5459 read_ehdr(fp);
5460+ read_phdrs(fp);
5461 read_shdrs(fp);
5462 read_strtabs(fp);
5463 read_symtabs(fp);
5464diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5465--- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5466+++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5467@@ -74,7 +74,7 @@ static int has_fpu(void)
5468 u16 fcw = -1, fsw = -1;
5469 u32 cr0;
5470
5471- asm("movl %%cr0,%0" : "=r" (cr0));
5472+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5473 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5474 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5475 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5476@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5477 {
5478 u32 f0, f1;
5479
5480- asm("pushfl ; "
5481+ asm volatile("pushfl ; "
5482 "pushfl ; "
5483 "popl %0 ; "
5484 "movl %0,%1 ; "
5485@@ -115,7 +115,7 @@ static void get_flags(void)
5486 set_bit(X86_FEATURE_FPU, cpu.flags);
5487
5488 if (has_eflag(X86_EFLAGS_ID)) {
5489- asm("cpuid"
5490+ asm volatile("cpuid"
5491 : "=a" (max_intel_level),
5492 "=b" (cpu_vendor[0]),
5493 "=d" (cpu_vendor[1]),
5494@@ -124,7 +124,7 @@ static void get_flags(void)
5495
5496 if (max_intel_level >= 0x00000001 &&
5497 max_intel_level <= 0x0000ffff) {
5498- asm("cpuid"
5499+ asm volatile("cpuid"
5500 : "=a" (tfms),
5501 "=c" (cpu.flags[4]),
5502 "=d" (cpu.flags[0])
5503@@ -136,7 +136,7 @@ static void get_flags(void)
5504 cpu.model += ((tfms >> 16) & 0xf) << 4;
5505 }
5506
5507- asm("cpuid"
5508+ asm volatile("cpuid"
5509 : "=a" (max_amd_level)
5510 : "a" (0x80000000)
5511 : "ebx", "ecx", "edx");
5512@@ -144,7 +144,7 @@ static void get_flags(void)
5513 if (max_amd_level >= 0x80000001 &&
5514 max_amd_level <= 0x8000ffff) {
5515 u32 eax = 0x80000001;
5516- asm("cpuid"
5517+ asm volatile("cpuid"
5518 : "+a" (eax),
5519 "=c" (cpu.flags[6]),
5520 "=d" (cpu.flags[1])
5521@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5522 u32 ecx = MSR_K7_HWCR;
5523 u32 eax, edx;
5524
5525- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5526+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5527 eax &= ~(1 << 15);
5528- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5530
5531 get_flags(); /* Make sure it really did something */
5532 err = check_flags();
5533@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5534 u32 ecx = MSR_VIA_FCR;
5535 u32 eax, edx;
5536
5537- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5538+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5539 eax |= (1<<1)|(1<<7);
5540- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5541+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5542
5543 set_bit(X86_FEATURE_CX8, cpu.flags);
5544 err = check_flags();
5545@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5546 u32 eax, edx;
5547 u32 level = 1;
5548
5549- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5550- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5551- asm("cpuid"
5552+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5553+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5554+ asm volatile("cpuid"
5555 : "+a" (level), "=d" (cpu.flags[0])
5556 : : "ecx", "ebx");
5557- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5559
5560 err = check_flags();
5561 }
5562diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5563--- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5564+++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5565@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5566 # single linked list of
5567 # struct setup_data
5568
5569-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5570+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5571
5572 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5573 #define VO_INIT_SIZE (VO__end - VO__text)
5574diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5575--- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5576+++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5577@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5578 $(call cc-option, -fno-stack-protector) \
5579 $(call cc-option, -mpreferred-stack-boundary=2)
5580 KBUILD_CFLAGS += $(call cc-option, -m32)
5581+ifdef CONSTIFY_PLUGIN
5582+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5583+endif
5584 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5585 GCOV_PROFILE := n
5586
5587diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5588--- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5589+++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5590@@ -19,7 +19,7 @@
5591
5592 static int detect_memory_e820(void)
5593 {
5594- int count = 0;
5595+ unsigned int count = 0;
5596 struct biosregs ireg, oreg;
5597 struct e820entry *desc = boot_params.e820_map;
5598 static struct e820entry buf; /* static so it is zeroed */
5599diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5600--- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5601+++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5602@@ -96,7 +96,7 @@ static void store_mode_params(void)
5603 static unsigned int get_entry(void)
5604 {
5605 char entry_buf[4];
5606- int i, len = 0;
5607+ unsigned int i, len = 0;
5608 int key;
5609 unsigned int v;
5610
5611diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5612--- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5613+++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5614@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5615
5616 boot_params.screen_info.vesapm_seg = oreg.es;
5617 boot_params.screen_info.vesapm_off = oreg.di;
5618+ boot_params.screen_info.vesapm_size = oreg.cx;
5619 }
5620
5621 /*
5622diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5623--- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5624+++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5625@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5626 unsigned long dump_start, dump_size;
5627 struct user32 dump;
5628
5629+ memset(&dump, 0, sizeof(dump));
5630+
5631 fs = get_fs();
5632 set_fs(KERNEL_DS);
5633 has_dumped = 1;
5634diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5635--- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5636+++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5637@@ -13,6 +13,7 @@
5638 #include <asm/thread_info.h>
5639 #include <asm/segment.h>
5640 #include <asm/irqflags.h>
5641+#include <asm/pgtable.h>
5642 #include <linux/linkage.h>
5643
5644 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5645@@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5646 ENDPROC(native_irq_enable_sysexit)
5647 #endif
5648
5649+ .macro pax_enter_kernel_user
5650+#ifdef CONFIG_PAX_MEMORY_UDEREF
5651+ call pax_enter_kernel_user
5652+#endif
5653+ .endm
5654+
5655+ .macro pax_exit_kernel_user
5656+#ifdef CONFIG_PAX_MEMORY_UDEREF
5657+ call pax_exit_kernel_user
5658+#endif
5659+#ifdef CONFIG_PAX_RANDKSTACK
5660+ pushq %rax
5661+ call pax_randomize_kstack
5662+ popq %rax
5663+#endif
5664+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5665+ call pax_erase_kstack
5666+#endif
5667+ .endm
5668+
5669+ .macro pax_erase_kstack
5670+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5671+ call pax_erase_kstack
5672+#endif
5673+ .endm
5674+
5675 /*
5676 * 32bit SYSENTER instruction entry.
5677 *
5678@@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5679 CFI_REGISTER rsp,rbp
5680 SWAPGS_UNSAFE_STACK
5681 movq PER_CPU_VAR(kernel_stack), %rsp
5682- addq $(KERNEL_STACK_OFFSET),%rsp
5683+ pax_enter_kernel_user
5684 /*
5685 * No need to follow this irqs on/off section: the syscall
5686 * disabled irqs, here we enable it straight after entry:
5687@@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5688 CFI_REL_OFFSET rsp,0
5689 pushfq_cfi
5690 /*CFI_REL_OFFSET rflags,0*/
5691- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5692+ GET_THREAD_INFO(%r10)
5693+ movl TI_sysenter_return(%r10), %r10d
5694 CFI_REGISTER rip,r10
5695 pushq_cfi $__USER32_CS
5696 /*CFI_REL_OFFSET cs,0*/
5697@@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5698 SAVE_ARGS 0,0,1
5699 /* no need to do an access_ok check here because rbp has been
5700 32bit zero extended */
5701+
5702+#ifdef CONFIG_PAX_MEMORY_UDEREF
5703+ mov $PAX_USER_SHADOW_BASE,%r10
5704+ add %r10,%rbp
5705+#endif
5706+
5707 1: movl (%rbp),%ebp
5708 .section __ex_table,"a"
5709 .quad 1b,ia32_badarg
5710@@ -168,6 +202,7 @@ sysenter_dispatch:
5711 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5712 jnz sysexit_audit
5713 sysexit_from_sys_call:
5714+ pax_exit_kernel_user
5715 andl $~TS_COMPAT,TI_status(%r10)
5716 /* clear IF, that popfq doesn't enable interrupts early */
5717 andl $~0x200,EFLAGS-R11(%rsp)
5718@@ -194,6 +229,9 @@ sysexit_from_sys_call:
5719 movl %eax,%esi /* 2nd arg: syscall number */
5720 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5721 call audit_syscall_entry
5722+
5723+ pax_erase_kstack
5724+
5725 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5726 cmpq $(IA32_NR_syscalls-1),%rax
5727 ja ia32_badsys
5728@@ -246,6 +284,9 @@ sysenter_tracesys:
5729 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5730 movq %rsp,%rdi /* &pt_regs -> arg1 */
5731 call syscall_trace_enter
5732+
5733+ pax_erase_kstack
5734+
5735 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5736 RESTORE_REST
5737 cmpq $(IA32_NR_syscalls-1),%rax
5738@@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5739 ENTRY(ia32_cstar_target)
5740 CFI_STARTPROC32 simple
5741 CFI_SIGNAL_FRAME
5742- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5743+ CFI_DEF_CFA rsp,0
5744 CFI_REGISTER rip,rcx
5745 /*CFI_REGISTER rflags,r11*/
5746 SWAPGS_UNSAFE_STACK
5747 movl %esp,%r8d
5748 CFI_REGISTER rsp,r8
5749 movq PER_CPU_VAR(kernel_stack),%rsp
5750+
5751+#ifdef CONFIG_PAX_MEMORY_UDEREF
5752+ pax_enter_kernel_user
5753+#endif
5754+
5755 /*
5756 * No need to follow this irqs on/off section: the syscall
5757 * disabled irqs and here we enable it straight after entry:
5758 */
5759 ENABLE_INTERRUPTS(CLBR_NONE)
5760- SAVE_ARGS 8,1,1
5761+ SAVE_ARGS 8*6,1,1
5762 movl %eax,%eax /* zero extension */
5763 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5764 movq %rcx,RIP-ARGOFFSET(%rsp)
5765@@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5766 /* no need to do an access_ok check here because r8 has been
5767 32bit zero extended */
5768 /* hardware stack frame is complete now */
5769+
5770+#ifdef CONFIG_PAX_MEMORY_UDEREF
5771+ mov $PAX_USER_SHADOW_BASE,%r10
5772+ add %r10,%r8
5773+#endif
5774+
5775 1: movl (%r8),%r9d
5776 .section __ex_table,"a"
5777 .quad 1b,ia32_badarg
5778@@ -327,6 +379,7 @@ cstar_dispatch:
5779 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5780 jnz sysretl_audit
5781 sysretl_from_sys_call:
5782+ pax_exit_kernel_user
5783 andl $~TS_COMPAT,TI_status(%r10)
5784 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5785 movl RIP-ARGOFFSET(%rsp),%ecx
5786@@ -364,6 +417,9 @@ cstar_tracesys:
5787 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5788 movq %rsp,%rdi /* &pt_regs -> arg1 */
5789 call syscall_trace_enter
5790+
5791+ pax_erase_kstack
5792+
5793 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5794 RESTORE_REST
5795 xchgl %ebp,%r9d
5796@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5797 CFI_REL_OFFSET rip,RIP-RIP
5798 PARAVIRT_ADJUST_EXCEPTION_FRAME
5799 SWAPGS
5800+ pax_enter_kernel_user
5801 /*
5802 * No need to follow this irqs on/off section: the syscall
5803 * disabled irqs and here we enable it straight after entry:
5804@@ -441,6 +498,9 @@ ia32_tracesys:
5805 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5806 movq %rsp,%rdi /* &pt_regs -> arg1 */
5807 call syscall_trace_enter
5808+
5809+ pax_erase_kstack
5810+
5811 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5812 RESTORE_REST
5813 cmpq $(IA32_NR_syscalls-1),%rax
5814diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5815--- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5816+++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5817@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5818 sp -= frame_size;
5819 /* Align the stack pointer according to the i386 ABI,
5820 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5821- sp = ((sp + 4) & -16ul) - 4;
5822+ sp = ((sp - 12) & -16ul) - 4;
5823 return (void __user *) sp;
5824 }
5825
5826@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5827 * These are actually not used anymore, but left because some
5828 * gdb versions depend on them as a marker.
5829 */
5830- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5836 0xb8,
5837 __NR_ia32_rt_sigreturn,
5838 0x80cd,
5839- 0,
5840+ 0
5841 };
5842
5843 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5844@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5845
5846 if (ka->sa.sa_flags & SA_RESTORER)
5847 restorer = ka->sa.sa_restorer;
5848+ else if (current->mm->context.vdso)
5849+ /* Return stub is in 32bit vsyscall page */
5850+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5851 else
5852- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5853- rt_sigreturn);
5854+ restorer = &frame->retcode;
5855 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5856
5857 /*
5858 * Not actually used anymore, but left because some gdb
5859 * versions need it.
5860 */
5861- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5862+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5863 } put_user_catch(err);
5864
5865 if (err)
5866diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5867--- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5868+++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5869@@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5870 ".section .discard,\"aw\",@progbits\n" \
5871 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5872 ".previous\n" \
5873- ".section .altinstr_replacement, \"ax\"\n" \
5874+ ".section .altinstr_replacement, \"a\"\n" \
5875 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5876 ".previous"
5877
5878diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5879--- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5880+++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5881@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5882 __asm__ __volatile__(APM_DO_ZERO_SEGS
5883 "pushl %%edi\n\t"
5884 "pushl %%ebp\n\t"
5885- "lcall *%%cs:apm_bios_entry\n\t"
5886+ "lcall *%%ss:apm_bios_entry\n\t"
5887 "setc %%al\n\t"
5888 "popl %%ebp\n\t"
5889 "popl %%edi\n\t"
5890@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5891 __asm__ __volatile__(APM_DO_ZERO_SEGS
5892 "pushl %%edi\n\t"
5893 "pushl %%ebp\n\t"
5894- "lcall *%%cs:apm_bios_entry\n\t"
5895+ "lcall *%%ss:apm_bios_entry\n\t"
5896 "setc %%bl\n\t"
5897 "popl %%ebp\n\t"
5898 "popl %%edi\n\t"
5899diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5900--- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5901+++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5902@@ -12,6 +12,14 @@ typedef struct {
5903 u64 __aligned(8) counter;
5904 } atomic64_t;
5905
5906+#ifdef CONFIG_PAX_REFCOUNT
5907+typedef struct {
5908+ u64 __aligned(8) counter;
5909+} atomic64_unchecked_t;
5910+#else
5911+typedef atomic64_t atomic64_unchecked_t;
5912+#endif
5913+
5914 #define ATOMIC64_INIT(val) { (val) }
5915
5916 #ifdef CONFIG_X86_CMPXCHG64
5917@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5918 }
5919
5920 /**
5921+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5922+ * @p: pointer to type atomic64_unchecked_t
5923+ * @o: expected value
5924+ * @n: new value
5925+ *
5926+ * Atomically sets @v to @n if it was equal to @o and returns
5927+ * the old value.
5928+ */
5929+
5930+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5931+{
5932+ return cmpxchg64(&v->counter, o, n);
5933+}
5934+
5935+/**
5936 * atomic64_xchg - xchg atomic64 variable
5937 * @v: pointer to type atomic64_t
5938 * @n: value to assign
5939@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5940 }
5941
5942 /**
5943+ * atomic64_set_unchecked - set atomic64 variable
5944+ * @v: pointer to type atomic64_unchecked_t
5945+ * @n: value to assign
5946+ *
5947+ * Atomically sets the value of @v to @n.
5948+ */
5949+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5950+{
5951+ unsigned high = (unsigned)(i >> 32);
5952+ unsigned low = (unsigned)i;
5953+ asm volatile(ATOMIC64_ALTERNATIVE(set)
5954+ : "+b" (low), "+c" (high)
5955+ : "S" (v)
5956+ : "eax", "edx", "memory"
5957+ );
5958+}
5959+
5960+/**
5961 * atomic64_read - read atomic64 variable
5962 * @v: pointer to type atomic64_t
5963 *
5964@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5965 }
5966
5967 /**
5968+ * atomic64_read_unchecked - read atomic64 variable
5969+ * @v: pointer to type atomic64_unchecked_t
5970+ *
5971+ * Atomically reads the value of @v and returns it.
5972+ */
5973+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5974+{
5975+ long long r;
5976+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5977+ : "=A" (r), "+c" (v)
5978+ : : "memory"
5979+ );
5980+ return r;
5981+ }
5982+
5983+/**
5984 * atomic64_add_return - add and return
5985 * @i: integer value to add
5986 * @v: pointer to type atomic64_t
5987@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5988 return i;
5989 }
5990
5991+/**
5992+ * atomic64_add_return_unchecked - add and return
5993+ * @i: integer value to add
5994+ * @v: pointer to type atomic64_unchecked_t
5995+ *
5996+ * Atomically adds @i to @v and returns @i + *@v
5997+ */
5998+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5999+{
6000+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6001+ : "+A" (i), "+c" (v)
6002+ : : "memory"
6003+ );
6004+ return i;
6005+}
6006+
6007 /*
6008 * Other variants with different arithmetic operators:
6009 */
6010@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6011 return a;
6012 }
6013
6014+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6015+{
6016+ long long a;
6017+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6018+ : "=A" (a)
6019+ : "S" (v)
6020+ : "memory", "ecx"
6021+ );
6022+ return a;
6023+}
6024+
6025 static inline long long atomic64_dec_return(atomic64_t *v)
6026 {
6027 long long a;
6028@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6029 }
6030
6031 /**
6032+ * atomic64_add_unchecked - add integer to atomic64 variable
6033+ * @i: integer value to add
6034+ * @v: pointer to type atomic64_unchecked_t
6035+ *
6036+ * Atomically adds @i to @v.
6037+ */
6038+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6039+{
6040+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6041+ : "+A" (i), "+c" (v)
6042+ : : "memory"
6043+ );
6044+ return i;
6045+}
6046+
6047+/**
6048 * atomic64_sub - subtract the atomic64 variable
6049 * @i: integer value to subtract
6050 * @v: pointer to type atomic64_t
6051diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6052--- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6053+++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6054@@ -18,7 +18,19 @@
6055 */
6056 static inline long atomic64_read(const atomic64_t *v)
6057 {
6058- return (*(volatile long *)&(v)->counter);
6059+ return (*(volatile const long *)&(v)->counter);
6060+}
6061+
6062+/**
6063+ * atomic64_read_unchecked - read atomic64 variable
6064+ * @v: pointer of type atomic64_unchecked_t
6065+ *
6066+ * Atomically reads the value of @v.
6067+ * Doesn't imply a read memory barrier.
6068+ */
6069+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6070+{
6071+ return (*(volatile const long *)&(v)->counter);
6072 }
6073
6074 /**
6075@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6076 }
6077
6078 /**
6079+ * atomic64_set_unchecked - set atomic64 variable
6080+ * @v: pointer to type atomic64_unchecked_t
6081+ * @i: required value
6082+ *
6083+ * Atomically sets the value of @v to @i.
6084+ */
6085+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6086+{
6087+ v->counter = i;
6088+}
6089+
6090+/**
6091 * atomic64_add - add integer to atomic64 variable
6092 * @i: integer value to add
6093 * @v: pointer to type atomic64_t
6094@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6095 */
6096 static inline void atomic64_add(long i, atomic64_t *v)
6097 {
6098+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6099+
6100+#ifdef CONFIG_PAX_REFCOUNT
6101+ "jno 0f\n"
6102+ LOCK_PREFIX "subq %1,%0\n"
6103+ "int $4\n0:\n"
6104+ _ASM_EXTABLE(0b, 0b)
6105+#endif
6106+
6107+ : "=m" (v->counter)
6108+ : "er" (i), "m" (v->counter));
6109+}
6110+
6111+/**
6112+ * atomic64_add_unchecked - add integer to atomic64 variable
6113+ * @i: integer value to add
6114+ * @v: pointer to type atomic64_unchecked_t
6115+ *
6116+ * Atomically adds @i to @v.
6117+ */
6118+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6119+{
6120 asm volatile(LOCK_PREFIX "addq %1,%0"
6121 : "=m" (v->counter)
6122 : "er" (i), "m" (v->counter));
6123@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6124 */
6125 static inline void atomic64_sub(long i, atomic64_t *v)
6126 {
6127- asm volatile(LOCK_PREFIX "subq %1,%0"
6128+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6129+
6130+#ifdef CONFIG_PAX_REFCOUNT
6131+ "jno 0f\n"
6132+ LOCK_PREFIX "addq %1,%0\n"
6133+ "int $4\n0:\n"
6134+ _ASM_EXTABLE(0b, 0b)
6135+#endif
6136+
6137+ : "=m" (v->counter)
6138+ : "er" (i), "m" (v->counter));
6139+}
6140+
6141+/**
6142+ * atomic64_sub_unchecked - subtract the atomic64 variable
6143+ * @i: integer value to subtract
6144+ * @v: pointer to type atomic64_unchecked_t
6145+ *
6146+ * Atomically subtracts @i from @v.
6147+ */
6148+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6149+{
6150+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6151 : "=m" (v->counter)
6152 : "er" (i), "m" (v->counter));
6153 }
6154@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6155 {
6156 unsigned char c;
6157
6158- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6159+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6160+
6161+#ifdef CONFIG_PAX_REFCOUNT
6162+ "jno 0f\n"
6163+ LOCK_PREFIX "addq %2,%0\n"
6164+ "int $4\n0:\n"
6165+ _ASM_EXTABLE(0b, 0b)
6166+#endif
6167+
6168+ "sete %1\n"
6169 : "=m" (v->counter), "=qm" (c)
6170 : "er" (i), "m" (v->counter) : "memory");
6171 return c;
6172@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6173 */
6174 static inline void atomic64_inc(atomic64_t *v)
6175 {
6176+ asm volatile(LOCK_PREFIX "incq %0\n"
6177+
6178+#ifdef CONFIG_PAX_REFCOUNT
6179+ "jno 0f\n"
6180+ LOCK_PREFIX "decq %0\n"
6181+ "int $4\n0:\n"
6182+ _ASM_EXTABLE(0b, 0b)
6183+#endif
6184+
6185+ : "=m" (v->counter)
6186+ : "m" (v->counter));
6187+}
6188+
6189+/**
6190+ * atomic64_inc_unchecked - increment atomic64 variable
6191+ * @v: pointer to type atomic64_unchecked_t
6192+ *
6193+ * Atomically increments @v by 1.
6194+ */
6195+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6196+{
6197 asm volatile(LOCK_PREFIX "incq %0"
6198 : "=m" (v->counter)
6199 : "m" (v->counter));
6200@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6201 */
6202 static inline void atomic64_dec(atomic64_t *v)
6203 {
6204- asm volatile(LOCK_PREFIX "decq %0"
6205+ asm volatile(LOCK_PREFIX "decq %0\n"
6206+
6207+#ifdef CONFIG_PAX_REFCOUNT
6208+ "jno 0f\n"
6209+ LOCK_PREFIX "incq %0\n"
6210+ "int $4\n0:\n"
6211+ _ASM_EXTABLE(0b, 0b)
6212+#endif
6213+
6214+ : "=m" (v->counter)
6215+ : "m" (v->counter));
6216+}
6217+
6218+/**
6219+ * atomic64_dec_unchecked - decrement atomic64 variable
6220+ * @v: pointer to type atomic64_t
6221+ *
6222+ * Atomically decrements @v by 1.
6223+ */
6224+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6225+{
6226+ asm volatile(LOCK_PREFIX "decq %0\n"
6227 : "=m" (v->counter)
6228 : "m" (v->counter));
6229 }
6230@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6231 {
6232 unsigned char c;
6233
6234- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6235+ asm volatile(LOCK_PREFIX "decq %0\n"
6236+
6237+#ifdef CONFIG_PAX_REFCOUNT
6238+ "jno 0f\n"
6239+ LOCK_PREFIX "incq %0\n"
6240+ "int $4\n0:\n"
6241+ _ASM_EXTABLE(0b, 0b)
6242+#endif
6243+
6244+ "sete %1\n"
6245 : "=m" (v->counter), "=qm" (c)
6246 : "m" (v->counter) : "memory");
6247 return c != 0;
6248@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6249 {
6250 unsigned char c;
6251
6252- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6253+ asm volatile(LOCK_PREFIX "incq %0\n"
6254+
6255+#ifdef CONFIG_PAX_REFCOUNT
6256+ "jno 0f\n"
6257+ LOCK_PREFIX "decq %0\n"
6258+ "int $4\n0:\n"
6259+ _ASM_EXTABLE(0b, 0b)
6260+#endif
6261+
6262+ "sete %1\n"
6263 : "=m" (v->counter), "=qm" (c)
6264 : "m" (v->counter) : "memory");
6265 return c != 0;
6266@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6267 {
6268 unsigned char c;
6269
6270- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6271+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6272+
6273+#ifdef CONFIG_PAX_REFCOUNT
6274+ "jno 0f\n"
6275+ LOCK_PREFIX "subq %2,%0\n"
6276+ "int $4\n0:\n"
6277+ _ASM_EXTABLE(0b, 0b)
6278+#endif
6279+
6280+ "sets %1\n"
6281 : "=m" (v->counter), "=qm" (c)
6282 : "er" (i), "m" (v->counter) : "memory");
6283 return c;
6284@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6285 static inline long atomic64_add_return(long i, atomic64_t *v)
6286 {
6287 long __i = i;
6288- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6289+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6290+
6291+#ifdef CONFIG_PAX_REFCOUNT
6292+ "jno 0f\n"
6293+ "movq %0, %1\n"
6294+ "int $4\n0:\n"
6295+ _ASM_EXTABLE(0b, 0b)
6296+#endif
6297+
6298+ : "+r" (i), "+m" (v->counter)
6299+ : : "memory");
6300+ return i + __i;
6301+}
6302+
6303+/**
6304+ * atomic64_add_return_unchecked - add and return
6305+ * @i: integer value to add
6306+ * @v: pointer to type atomic64_unchecked_t
6307+ *
6308+ * Atomically adds @i to @v and returns @i + @v
6309+ */
6310+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6311+{
6312+ long __i = i;
6313+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6314 : "+r" (i), "+m" (v->counter)
6315 : : "memory");
6316 return i + __i;
6317@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6318 }
6319
6320 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6321+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6322+{
6323+ return atomic64_add_return_unchecked(1, v);
6324+}
6325 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6326
6327 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6328@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6329 return cmpxchg(&v->counter, old, new);
6330 }
6331
6332+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6333+{
6334+ return cmpxchg(&v->counter, old, new);
6335+}
6336+
6337 static inline long atomic64_xchg(atomic64_t *v, long new)
6338 {
6339 return xchg(&v->counter, new);
6340@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6341 */
6342 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6343 {
6344- long c, old;
6345+ long c, old, new;
6346 c = atomic64_read(v);
6347 for (;;) {
6348- if (unlikely(c == (u)))
6349+ if (unlikely(c == u))
6350 break;
6351- old = atomic64_cmpxchg((v), c, c + (a));
6352+
6353+ asm volatile("add %2,%0\n"
6354+
6355+#ifdef CONFIG_PAX_REFCOUNT
6356+ "jno 0f\n"
6357+ "sub %2,%0\n"
6358+ "int $4\n0:\n"
6359+ _ASM_EXTABLE(0b, 0b)
6360+#endif
6361+
6362+ : "=r" (new)
6363+ : "0" (c), "ir" (a));
6364+
6365+ old = atomic64_cmpxchg(v, c, new);
6366 if (likely(old == c))
6367 break;
6368 c = old;
6369 }
6370- return c != (u);
6371+ return c != u;
6372 }
6373
6374 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6375diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6376--- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6377+++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6378@@ -22,7 +22,18 @@
6379 */
6380 static inline int atomic_read(const atomic_t *v)
6381 {
6382- return (*(volatile int *)&(v)->counter);
6383+ return (*(volatile const int *)&(v)->counter);
6384+}
6385+
6386+/**
6387+ * atomic_read_unchecked - read atomic variable
6388+ * @v: pointer of type atomic_unchecked_t
6389+ *
6390+ * Atomically reads the value of @v.
6391+ */
6392+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6393+{
6394+ return (*(volatile const int *)&(v)->counter);
6395 }
6396
6397 /**
6398@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6399 }
6400
6401 /**
6402+ * atomic_set_unchecked - set atomic variable
6403+ * @v: pointer of type atomic_unchecked_t
6404+ * @i: required value
6405+ *
6406+ * Atomically sets the value of @v to @i.
6407+ */
6408+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6409+{
6410+ v->counter = i;
6411+}
6412+
6413+/**
6414 * atomic_add - add integer to atomic variable
6415 * @i: integer value to add
6416 * @v: pointer of type atomic_t
6417@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6418 */
6419 static inline void atomic_add(int i, atomic_t *v)
6420 {
6421- asm volatile(LOCK_PREFIX "addl %1,%0"
6422+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6423+
6424+#ifdef CONFIG_PAX_REFCOUNT
6425+ "jno 0f\n"
6426+ LOCK_PREFIX "subl %1,%0\n"
6427+ "int $4\n0:\n"
6428+ _ASM_EXTABLE(0b, 0b)
6429+#endif
6430+
6431+ : "+m" (v->counter)
6432+ : "ir" (i));
6433+}
6434+
6435+/**
6436+ * atomic_add_unchecked - add integer to atomic variable
6437+ * @i: integer value to add
6438+ * @v: pointer of type atomic_unchecked_t
6439+ *
6440+ * Atomically adds @i to @v.
6441+ */
6442+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6443+{
6444+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6445 : "+m" (v->counter)
6446 : "ir" (i));
6447 }
6448@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6449 */
6450 static inline void atomic_sub(int i, atomic_t *v)
6451 {
6452- asm volatile(LOCK_PREFIX "subl %1,%0"
6453+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6454+
6455+#ifdef CONFIG_PAX_REFCOUNT
6456+ "jno 0f\n"
6457+ LOCK_PREFIX "addl %1,%0\n"
6458+ "int $4\n0:\n"
6459+ _ASM_EXTABLE(0b, 0b)
6460+#endif
6461+
6462+ : "+m" (v->counter)
6463+ : "ir" (i));
6464+}
6465+
6466+/**
6467+ * atomic_sub_unchecked - subtract integer from atomic variable
6468+ * @i: integer value to subtract
6469+ * @v: pointer of type atomic_unchecked_t
6470+ *
6471+ * Atomically subtracts @i from @v.
6472+ */
6473+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6474+{
6475+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6476 : "+m" (v->counter)
6477 : "ir" (i));
6478 }
6479@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6480 {
6481 unsigned char c;
6482
6483- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6484+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ "jno 0f\n"
6488+ LOCK_PREFIX "addl %2,%0\n"
6489+ "int $4\n0:\n"
6490+ _ASM_EXTABLE(0b, 0b)
6491+#endif
6492+
6493+ "sete %1\n"
6494 : "+m" (v->counter), "=qm" (c)
6495 : "ir" (i) : "memory");
6496 return c;
6497@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6498 */
6499 static inline void atomic_inc(atomic_t *v)
6500 {
6501- asm volatile(LOCK_PREFIX "incl %0"
6502+ asm volatile(LOCK_PREFIX "incl %0\n"
6503+
6504+#ifdef CONFIG_PAX_REFCOUNT
6505+ "jno 0f\n"
6506+ LOCK_PREFIX "decl %0\n"
6507+ "int $4\n0:\n"
6508+ _ASM_EXTABLE(0b, 0b)
6509+#endif
6510+
6511+ : "+m" (v->counter));
6512+}
6513+
6514+/**
6515+ * atomic_inc_unchecked - increment atomic variable
6516+ * @v: pointer of type atomic_unchecked_t
6517+ *
6518+ * Atomically increments @v by 1.
6519+ */
6520+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6521+{
6522+ asm volatile(LOCK_PREFIX "incl %0\n"
6523 : "+m" (v->counter));
6524 }
6525
6526@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6527 */
6528 static inline void atomic_dec(atomic_t *v)
6529 {
6530- asm volatile(LOCK_PREFIX "decl %0"
6531+ asm volatile(LOCK_PREFIX "decl %0\n"
6532+
6533+#ifdef CONFIG_PAX_REFCOUNT
6534+ "jno 0f\n"
6535+ LOCK_PREFIX "incl %0\n"
6536+ "int $4\n0:\n"
6537+ _ASM_EXTABLE(0b, 0b)
6538+#endif
6539+
6540+ : "+m" (v->counter));
6541+}
6542+
6543+/**
6544+ * atomic_dec_unchecked - decrement atomic variable
6545+ * @v: pointer of type atomic_unchecked_t
6546+ *
6547+ * Atomically decrements @v by 1.
6548+ */
6549+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6550+{
6551+ asm volatile(LOCK_PREFIX "decl %0\n"
6552 : "+m" (v->counter));
6553 }
6554
6555@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6556 {
6557 unsigned char c;
6558
6559- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6560+ asm volatile(LOCK_PREFIX "decl %0\n"
6561+
6562+#ifdef CONFIG_PAX_REFCOUNT
6563+ "jno 0f\n"
6564+ LOCK_PREFIX "incl %0\n"
6565+ "int $4\n0:\n"
6566+ _ASM_EXTABLE(0b, 0b)
6567+#endif
6568+
6569+ "sete %1\n"
6570 : "+m" (v->counter), "=qm" (c)
6571 : : "memory");
6572 return c != 0;
6573@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6574 {
6575 unsigned char c;
6576
6577- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6578+ asm volatile(LOCK_PREFIX "incl %0\n"
6579+
6580+#ifdef CONFIG_PAX_REFCOUNT
6581+ "jno 0f\n"
6582+ LOCK_PREFIX "decl %0\n"
6583+ "int $4\n0:\n"
6584+ _ASM_EXTABLE(0b, 0b)
6585+#endif
6586+
6587+ "sete %1\n"
6588+ : "+m" (v->counter), "=qm" (c)
6589+ : : "memory");
6590+ return c != 0;
6591+}
6592+
6593+/**
6594+ * atomic_inc_and_test_unchecked - increment and test
6595+ * @v: pointer of type atomic_unchecked_t
6596+ *
6597+ * Atomically increments @v by 1
6598+ * and returns true if the result is zero, or false for all
6599+ * other cases.
6600+ */
6601+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6602+{
6603+ unsigned char c;
6604+
6605+ asm volatile(LOCK_PREFIX "incl %0\n"
6606+ "sete %1\n"
6607 : "+m" (v->counter), "=qm" (c)
6608 : : "memory");
6609 return c != 0;
6610@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6611 {
6612 unsigned char c;
6613
6614- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6615+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6616+
6617+#ifdef CONFIG_PAX_REFCOUNT
6618+ "jno 0f\n"
6619+ LOCK_PREFIX "subl %2,%0\n"
6620+ "int $4\n0:\n"
6621+ _ASM_EXTABLE(0b, 0b)
6622+#endif
6623+
6624+ "sets %1\n"
6625 : "+m" (v->counter), "=qm" (c)
6626 : "ir" (i) : "memory");
6627 return c;
6628@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6629 #endif
6630 /* Modern 486+ processor */
6631 __i = i;
6632+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6633+
6634+#ifdef CONFIG_PAX_REFCOUNT
6635+ "jno 0f\n"
6636+ "movl %0, %1\n"
6637+ "int $4\n0:\n"
6638+ _ASM_EXTABLE(0b, 0b)
6639+#endif
6640+
6641+ : "+r" (i), "+m" (v->counter)
6642+ : : "memory");
6643+ return i + __i;
6644+
6645+#ifdef CONFIG_M386
6646+no_xadd: /* Legacy 386 processor */
6647+ local_irq_save(flags);
6648+ __i = atomic_read(v);
6649+ atomic_set(v, i + __i);
6650+ local_irq_restore(flags);
6651+ return i + __i;
6652+#endif
6653+}
6654+
6655+/**
6656+ * atomic_add_return_unchecked - add integer and return
6657+ * @v: pointer of type atomic_unchecked_t
6658+ * @i: integer value to add
6659+ *
6660+ * Atomically adds @i to @v and returns @i + @v
6661+ */
6662+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6663+{
6664+ int __i;
6665+#ifdef CONFIG_M386
6666+ unsigned long flags;
6667+ if (unlikely(boot_cpu_data.x86 <= 3))
6668+ goto no_xadd;
6669+#endif
6670+ /* Modern 486+ processor */
6671+ __i = i;
6672 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6673 : "+r" (i), "+m" (v->counter)
6674 : : "memory");
6675@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6676 }
6677
6678 #define atomic_inc_return(v) (atomic_add_return(1, v))
6679+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6680+{
6681+ return atomic_add_return_unchecked(1, v);
6682+}
6683 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6684
6685 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6686@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6687 return cmpxchg(&v->counter, old, new);
6688 }
6689
6690+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6691+{
6692+ return cmpxchg(&v->counter, old, new);
6693+}
6694+
6695 static inline int atomic_xchg(atomic_t *v, int new)
6696 {
6697 return xchg(&v->counter, new);
6698 }
6699
6700+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6701+{
6702+ return xchg(&v->counter, new);
6703+}
6704+
6705 /**
6706 * atomic_add_unless - add unless the number is already a given value
6707 * @v: pointer of type atomic_t
6708@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6709 */
6710 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6711 {
6712- int c, old;
6713+ int c, old, new;
6714 c = atomic_read(v);
6715 for (;;) {
6716- if (unlikely(c == (u)))
6717+ if (unlikely(c == u))
6718 break;
6719- old = atomic_cmpxchg((v), c, c + (a));
6720+
6721+ asm volatile("addl %2,%0\n"
6722+
6723+#ifdef CONFIG_PAX_REFCOUNT
6724+ "jno 0f\n"
6725+ "subl %2,%0\n"
6726+ "int $4\n0:\n"
6727+ _ASM_EXTABLE(0b, 0b)
6728+#endif
6729+
6730+ : "=r" (new)
6731+ : "0" (c), "ir" (a));
6732+
6733+ old = atomic_cmpxchg(v, c, new);
6734 if (likely(old == c))
6735 break;
6736 c = old;
6737 }
6738- return c != (u);
6739+ return c != u;
6740 }
6741
6742 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6743
6744+/**
6745+ * atomic_inc_not_zero_hint - increment if not null
6746+ * @v: pointer of type atomic_t
6747+ * @hint: probable value of the atomic before the increment
6748+ *
6749+ * This version of atomic_inc_not_zero() gives a hint of probable
6750+ * value of the atomic. This helps processor to not read the memory
6751+ * before doing the atomic read/modify/write cycle, lowering
6752+ * number of bus transactions on some arches.
6753+ *
6754+ * Returns: 0 if increment was not done, 1 otherwise.
6755+ */
6756+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6757+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6758+{
6759+ int val, c = hint, new;
6760+
6761+ /* sanity test, should be removed by compiler if hint is a constant */
6762+ if (!hint)
6763+ return atomic_inc_not_zero(v);
6764+
6765+ do {
6766+ asm volatile("incl %0\n"
6767+
6768+#ifdef CONFIG_PAX_REFCOUNT
6769+ "jno 0f\n"
6770+ "decl %0\n"
6771+ "int $4\n0:\n"
6772+ _ASM_EXTABLE(0b, 0b)
6773+#endif
6774+
6775+ : "=r" (new)
6776+ : "0" (c));
6777+
6778+ val = atomic_cmpxchg(v, c, new);
6779+ if (val == c)
6780+ return 1;
6781+ c = val;
6782+ } while (c);
6783+
6784+ return 0;
6785+}
6786+
6787 /*
6788 * atomic_dec_if_positive - decrement by 1 if old value positive
6789 * @v: pointer of type atomic_t
6790diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6791--- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6792+++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6793@@ -38,7 +38,7 @@
6794 * a mask operation on a byte.
6795 */
6796 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6797-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6798+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6799 #define CONST_MASK(nr) (1 << ((nr) & 7))
6800
6801 /**
6802diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6803--- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6804+++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6805@@ -11,10 +11,15 @@
6806 #include <asm/pgtable_types.h>
6807
6808 /* Physical address where kernel should be loaded. */
6809-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6810+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6811 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6812 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6813
6814+#ifndef __ASSEMBLY__
6815+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6816+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6817+#endif
6818+
6819 /* Minimum kernel alignment, as a power of two */
6820 #ifdef CONFIG_X86_64
6821 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6822diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6823--- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6824+++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6825@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6826 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6827
6828 if (pg_flags == _PGMT_DEFAULT)
6829- return -1;
6830+ return ~0UL;
6831 else if (pg_flags == _PGMT_WC)
6832 return _PAGE_CACHE_WC;
6833 else if (pg_flags == _PGMT_UC_MINUS)
6834diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6835--- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6836+++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6837@@ -5,12 +5,13 @@
6838
6839 /* L1 cache line size */
6840 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6841-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6842+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6843
6844 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6845+#define __read_only __attribute__((__section__(".data..read_only")))
6846
6847 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6848-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6849+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6850
6851 #ifdef CONFIG_X86_VSMP
6852 #ifdef CONFIG_SMP
6853diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6854--- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6855+++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6856@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6857 int len, __wsum sum,
6858 int *src_err_ptr, int *dst_err_ptr);
6859
6860+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6861+ int len, __wsum sum,
6862+ int *src_err_ptr, int *dst_err_ptr);
6863+
6864+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6865+ int len, __wsum sum,
6866+ int *src_err_ptr, int *dst_err_ptr);
6867+
6868 /*
6869 * Note: when you get a NULL pointer exception here this means someone
6870 * passed in an incorrect kernel address to one of these functions.
6871@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6872 int *err_ptr)
6873 {
6874 might_sleep();
6875- return csum_partial_copy_generic((__force void *)src, dst,
6876+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6877 len, sum, err_ptr, NULL);
6878 }
6879
6880@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6881 {
6882 might_sleep();
6883 if (access_ok(VERIFY_WRITE, dst, len))
6884- return csum_partial_copy_generic(src, (__force void *)dst,
6885+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6886 len, sum, NULL, err_ptr);
6887
6888 if (len)
6889diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6890--- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6891+++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6892@@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6893 ".section .discard,\"aw\",@progbits\n"
6894 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6895 ".previous\n"
6896- ".section .altinstr_replacement,\"ax\"\n"
6897+ ".section .altinstr_replacement,\"a\"\n"
6898 "3: movb $1,%0\n"
6899 "4:\n"
6900 ".previous\n"
6901diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6902--- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6903+++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6904@@ -31,6 +31,12 @@ struct desc_struct {
6905 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6906 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6907 };
6908+ struct {
6909+ u16 offset_low;
6910+ u16 seg;
6911+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6912+ unsigned offset_high: 16;
6913+ } gate;
6914 };
6915 } __attribute__((packed));
6916
6917diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6918--- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6919+++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6920@@ -4,6 +4,7 @@
6921 #include <asm/desc_defs.h>
6922 #include <asm/ldt.h>
6923 #include <asm/mmu.h>
6924+#include <asm/pgtable.h>
6925 #include <linux/smp.h>
6926
6927 static inline void fill_ldt(struct desc_struct *desc,
6928@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6929 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6930 desc->type = (info->read_exec_only ^ 1) << 1;
6931 desc->type |= info->contents << 2;
6932+ desc->type |= info->seg_not_present ^ 1;
6933 desc->s = 1;
6934 desc->dpl = 0x3;
6935 desc->p = info->seg_not_present ^ 1;
6936@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6937 }
6938
6939 extern struct desc_ptr idt_descr;
6940-extern gate_desc idt_table[];
6941-
6942-struct gdt_page {
6943- struct desc_struct gdt[GDT_ENTRIES];
6944-} __attribute__((aligned(PAGE_SIZE)));
6945-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6946+extern gate_desc idt_table[256];
6947
6948+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6949 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6950 {
6951- return per_cpu(gdt_page, cpu).gdt;
6952+ return cpu_gdt_table[cpu];
6953 }
6954
6955 #ifdef CONFIG_X86_64
6956@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6957 unsigned long base, unsigned dpl, unsigned flags,
6958 unsigned short seg)
6959 {
6960- gate->a = (seg << 16) | (base & 0xffff);
6961- gate->b = (base & 0xffff0000) |
6962- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6963+ gate->gate.offset_low = base;
6964+ gate->gate.seg = seg;
6965+ gate->gate.reserved = 0;
6966+ gate->gate.type = type;
6967+ gate->gate.s = 0;
6968+ gate->gate.dpl = dpl;
6969+ gate->gate.p = 1;
6970+ gate->gate.offset_high = base >> 16;
6971 }
6972
6973 #endif
6974@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
6975 static inline void native_write_idt_entry(gate_desc *idt, int entry,
6976 const gate_desc *gate)
6977 {
6978+ pax_open_kernel();
6979 memcpy(&idt[entry], gate, sizeof(*gate));
6980+ pax_close_kernel();
6981 }
6982
6983 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
6984 const void *desc)
6985 {
6986+ pax_open_kernel();
6987 memcpy(&ldt[entry], desc, 8);
6988+ pax_close_kernel();
6989 }
6990
6991 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
6992@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
6993 size = sizeof(struct desc_struct);
6994 break;
6995 }
6996+
6997+ pax_open_kernel();
6998 memcpy(&gdt[entry], desc, size);
6999+ pax_close_kernel();
7000 }
7001
7002 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7003@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7004
7005 static inline void native_load_tr_desc(void)
7006 {
7007+ pax_open_kernel();
7008 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7009+ pax_close_kernel();
7010 }
7011
7012 static inline void native_load_gdt(const struct desc_ptr *dtr)
7013@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7014 unsigned int i;
7015 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7016
7017+ pax_open_kernel();
7018 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7019 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7020+ pax_close_kernel();
7021 }
7022
7023 #define _LDT_empty(info) \
7024@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7025 desc->limit = (limit >> 16) & 0xf;
7026 }
7027
7028-static inline void _set_gate(int gate, unsigned type, void *addr,
7029+static inline void _set_gate(int gate, unsigned type, const void *addr,
7030 unsigned dpl, unsigned ist, unsigned seg)
7031 {
7032 gate_desc s;
7033@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7034 * Pentium F0 0F bugfix can have resulted in the mapped
7035 * IDT being write-protected.
7036 */
7037-static inline void set_intr_gate(unsigned int n, void *addr)
7038+static inline void set_intr_gate(unsigned int n, const void *addr)
7039 {
7040 BUG_ON((unsigned)n > 0xFF);
7041 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7042@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7043 /*
7044 * This routine sets up an interrupt gate at directory privilege level 3.
7045 */
7046-static inline void set_system_intr_gate(unsigned int n, void *addr)
7047+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7048 {
7049 BUG_ON((unsigned)n > 0xFF);
7050 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7051 }
7052
7053-static inline void set_system_trap_gate(unsigned int n, void *addr)
7054+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7055 {
7056 BUG_ON((unsigned)n > 0xFF);
7057 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7058 }
7059
7060-static inline void set_trap_gate(unsigned int n, void *addr)
7061+static inline void set_trap_gate(unsigned int n, const void *addr)
7062 {
7063 BUG_ON((unsigned)n > 0xFF);
7064 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7065@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7066 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7067 {
7068 BUG_ON((unsigned)n > 0xFF);
7069- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7070+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7071 }
7072
7073-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7074+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7075 {
7076 BUG_ON((unsigned)n > 0xFF);
7077 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7078 }
7079
7080-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7081+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7082 {
7083 BUG_ON((unsigned)n > 0xFF);
7084 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7085 }
7086
7087+#ifdef CONFIG_X86_32
7088+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7089+{
7090+ struct desc_struct d;
7091+
7092+ if (likely(limit))
7093+ limit = (limit - 1UL) >> PAGE_SHIFT;
7094+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7095+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7096+}
7097+#endif
7098+
7099 #endif /* _ASM_X86_DESC_H */
7100diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7101--- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7102+++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7103@@ -69,7 +69,7 @@ struct e820map {
7104 #define ISA_START_ADDRESS 0xa0000
7105 #define ISA_END_ADDRESS 0x100000
7106
7107-#define BIOS_BEGIN 0x000a0000
7108+#define BIOS_BEGIN 0x000c0000
7109 #define BIOS_END 0x00100000
7110
7111 #define BIOS_ROM_BASE 0xffe00000
7112diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7113--- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7114+++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7115@@ -237,7 +237,25 @@ extern int force_personality32;
7116 the loader. We need to make sure that it is out of the way of the program
7117 that it will "exec", and that there is sufficient room for the brk. */
7118
7119+#ifdef CONFIG_PAX_SEGMEXEC
7120+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7121+#else
7122 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7123+#endif
7124+
7125+#ifdef CONFIG_PAX_ASLR
7126+#ifdef CONFIG_X86_32
7127+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7128+
7129+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7130+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7131+#else
7132+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7133+
7134+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7135+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7136+#endif
7137+#endif
7138
7139 /* This yields a mask that user programs can use to figure out what
7140 instruction set this CPU supports. This could be done in user space,
7141@@ -291,8 +309,7 @@ do { \
7142 #define ARCH_DLINFO \
7143 do { \
7144 if (vdso_enabled) \
7145- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7146- (unsigned long)current->mm->context.vdso); \
7147+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7148 } while (0)
7149
7150 #define AT_SYSINFO 32
7151@@ -303,7 +320,7 @@ do { \
7152
7153 #endif /* !CONFIG_X86_32 */
7154
7155-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7156+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7157
7158 #define VDSO_ENTRY \
7159 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7160@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7161 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7162 #define compat_arch_setup_additional_pages syscall32_setup_pages
7163
7164-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7165-#define arch_randomize_brk arch_randomize_brk
7166-
7167 #endif /* _ASM_X86_ELF_H */
7168diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7169--- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7170+++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7171@@ -15,6 +15,6 @@ enum reboot_type {
7172
7173 extern enum reboot_type reboot_type;
7174
7175-extern void machine_emergency_restart(void);
7176+extern void machine_emergency_restart(void) __noreturn;
7177
7178 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7179diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7180--- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7181+++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7182@@ -12,16 +12,18 @@
7183 #include <asm/system.h>
7184
7185 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7186+ typecheck(u32 *, uaddr); \
7187 asm volatile("1:\t" insn "\n" \
7188 "2:\t.section .fixup,\"ax\"\n" \
7189 "3:\tmov\t%3, %1\n" \
7190 "\tjmp\t2b\n" \
7191 "\t.previous\n" \
7192 _ASM_EXTABLE(1b, 3b) \
7193- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7194+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7195 : "i" (-EFAULT), "0" (oparg), "1" (0))
7196
7197 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7198+ typecheck(u32 *, uaddr); \
7199 asm volatile("1:\tmovl %2, %0\n" \
7200 "\tmovl\t%0, %3\n" \
7201 "\t" insn "\n" \
7202@@ -34,7 +36,7 @@
7203 _ASM_EXTABLE(1b, 4b) \
7204 _ASM_EXTABLE(2b, 4b) \
7205 : "=&a" (oldval), "=&r" (ret), \
7206- "+m" (*uaddr), "=&r" (tem) \
7207+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7208 : "r" (oparg), "i" (-EFAULT), "1" (0))
7209
7210 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7211@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7212
7213 switch (op) {
7214 case FUTEX_OP_SET:
7215- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7216+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7217 break;
7218 case FUTEX_OP_ADD:
7219- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7220+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7221 uaddr, oparg);
7222 break;
7223 case FUTEX_OP_OR:
7224@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7225 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7226 return -EFAULT;
7227
7228- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7229+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7230 "2:\t.section .fixup, \"ax\"\n"
7231 "3:\tmov %3, %0\n"
7232 "\tjmp 2b\n"
7233 "\t.previous\n"
7234 _ASM_EXTABLE(1b, 3b)
7235- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7236+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7237 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7238 : "memory"
7239 );
7240diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7241--- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7242+++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7243@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7244 extern void enable_IO_APIC(void);
7245
7246 /* Statistics */
7247-extern atomic_t irq_err_count;
7248-extern atomic_t irq_mis_count;
7249+extern atomic_unchecked_t irq_err_count;
7250+extern atomic_unchecked_t irq_mis_count;
7251
7252 /* EISA */
7253 extern void eisa_set_level_irq(unsigned int irq);
7254diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7255--- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7256+++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7257@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7258 {
7259 int err;
7260
7261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7262+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7263+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7264+#endif
7265+
7266 /* See comment in fxsave() below. */
7267 #ifdef CONFIG_AS_FXSAVEQ
7268 asm volatile("1: fxrstorq %[fx]\n\t"
7269@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7270 {
7271 int err;
7272
7273+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7274+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7275+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7276+#endif
7277+
7278 /*
7279 * Clear the bytes not touched by the fxsave and reserved
7280 * for the SW usage.
7281@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7282 #endif /* CONFIG_X86_64 */
7283
7284 /* We need a safe address that is cheap to find and that is already
7285- in L1 during context switch. The best choices are unfortunately
7286- different for UP and SMP */
7287-#ifdef CONFIG_SMP
7288-#define safe_address (__per_cpu_offset[0])
7289-#else
7290-#define safe_address (kstat_cpu(0).cpustat.user)
7291-#endif
7292+ in L1 during context switch. */
7293+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7294
7295 /*
7296 * These must be called with preempt disabled
7297@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7298 struct thread_info *me = current_thread_info();
7299 preempt_disable();
7300 if (me->status & TS_USEDFPU)
7301- __save_init_fpu(me->task);
7302+ __save_init_fpu(current);
7303 else
7304 clts();
7305 }
7306diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7307--- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7308+++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7309@@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7310
7311 #include <linux/vmalloc.h>
7312
7313+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7314+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7315+{
7316+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7317+}
7318+
7319+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7320+{
7321+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7322+}
7323+
7324 /*
7325 * Convert a virtual cached pointer to an uncached pointer
7326 */
7327diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7328--- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7329+++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7330@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7331 sti; \
7332 sysexit
7333
7334+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7335+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7336+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7337+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7338+
7339 #else
7340 #define INTERRUPT_RETURN iret
7341 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7342diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7343--- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7344+++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7345@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7346 #define RELATIVEJUMP_SIZE 5
7347 #define RELATIVECALL_OPCODE 0xe8
7348 #define RELATIVE_ADDR_SIZE 4
7349-#define MAX_STACK_SIZE 64
7350-#define MIN_STACK_SIZE(ADDR) \
7351- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7352- THREAD_SIZE - (unsigned long)(ADDR))) \
7353- ? (MAX_STACK_SIZE) \
7354- : (((unsigned long)current_thread_info()) + \
7355- THREAD_SIZE - (unsigned long)(ADDR)))
7356+#define MAX_STACK_SIZE 64UL
7357+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7358
7359 #define flush_insn_slot(p) do { } while (0)
7360
7361diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7362--- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7363+++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7364@@ -419,7 +419,7 @@ struct kvm_arch {
7365 unsigned int n_used_mmu_pages;
7366 unsigned int n_requested_mmu_pages;
7367 unsigned int n_max_mmu_pages;
7368- atomic_t invlpg_counter;
7369+ atomic_unchecked_t invlpg_counter;
7370 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7371 /*
7372 * Hash table of struct kvm_mmu_page.
7373@@ -589,7 +589,7 @@ struct kvm_x86_ops {
7374 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7375
7376 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7377- const struct trace_print_flags *exit_reasons_str;
7378+ const struct trace_print_flags * const exit_reasons_str;
7379 };
7380
7381 struct kvm_arch_async_pf {
7382diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7383--- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7384+++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7385@@ -18,26 +18,58 @@ typedef struct {
7386
7387 static inline void local_inc(local_t *l)
7388 {
7389- asm volatile(_ASM_INC "%0"
7390+ asm volatile(_ASM_INC "%0\n"
7391+
7392+#ifdef CONFIG_PAX_REFCOUNT
7393+ "jno 0f\n"
7394+ _ASM_DEC "%0\n"
7395+ "int $4\n0:\n"
7396+ _ASM_EXTABLE(0b, 0b)
7397+#endif
7398+
7399 : "+m" (l->a.counter));
7400 }
7401
7402 static inline void local_dec(local_t *l)
7403 {
7404- asm volatile(_ASM_DEC "%0"
7405+ asm volatile(_ASM_DEC "%0\n"
7406+
7407+#ifdef CONFIG_PAX_REFCOUNT
7408+ "jno 0f\n"
7409+ _ASM_INC "%0\n"
7410+ "int $4\n0:\n"
7411+ _ASM_EXTABLE(0b, 0b)
7412+#endif
7413+
7414 : "+m" (l->a.counter));
7415 }
7416
7417 static inline void local_add(long i, local_t *l)
7418 {
7419- asm volatile(_ASM_ADD "%1,%0"
7420+ asm volatile(_ASM_ADD "%1,%0\n"
7421+
7422+#ifdef CONFIG_PAX_REFCOUNT
7423+ "jno 0f\n"
7424+ _ASM_SUB "%1,%0\n"
7425+ "int $4\n0:\n"
7426+ _ASM_EXTABLE(0b, 0b)
7427+#endif
7428+
7429 : "+m" (l->a.counter)
7430 : "ir" (i));
7431 }
7432
7433 static inline void local_sub(long i, local_t *l)
7434 {
7435- asm volatile(_ASM_SUB "%1,%0"
7436+ asm volatile(_ASM_SUB "%1,%0\n"
7437+
7438+#ifdef CONFIG_PAX_REFCOUNT
7439+ "jno 0f\n"
7440+ _ASM_ADD "%1,%0\n"
7441+ "int $4\n0:\n"
7442+ _ASM_EXTABLE(0b, 0b)
7443+#endif
7444+
7445 : "+m" (l->a.counter)
7446 : "ir" (i));
7447 }
7448@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7449 {
7450 unsigned char c;
7451
7452- asm volatile(_ASM_SUB "%2,%0; sete %1"
7453+ asm volatile(_ASM_SUB "%2,%0\n"
7454+
7455+#ifdef CONFIG_PAX_REFCOUNT
7456+ "jno 0f\n"
7457+ _ASM_ADD "%2,%0\n"
7458+ "int $4\n0:\n"
7459+ _ASM_EXTABLE(0b, 0b)
7460+#endif
7461+
7462+ "sete %1\n"
7463 : "+m" (l->a.counter), "=qm" (c)
7464 : "ir" (i) : "memory");
7465 return c;
7466@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7467 {
7468 unsigned char c;
7469
7470- asm volatile(_ASM_DEC "%0; sete %1"
7471+ asm volatile(_ASM_DEC "%0\n"
7472+
7473+#ifdef CONFIG_PAX_REFCOUNT
7474+ "jno 0f\n"
7475+ _ASM_INC "%0\n"
7476+ "int $4\n0:\n"
7477+ _ASM_EXTABLE(0b, 0b)
7478+#endif
7479+
7480+ "sete %1\n"
7481 : "+m" (l->a.counter), "=qm" (c)
7482 : : "memory");
7483 return c != 0;
7484@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7485 {
7486 unsigned char c;
7487
7488- asm volatile(_ASM_INC "%0; sete %1"
7489+ asm volatile(_ASM_INC "%0\n"
7490+
7491+#ifdef CONFIG_PAX_REFCOUNT
7492+ "jno 0f\n"
7493+ _ASM_DEC "%0\n"
7494+ "int $4\n0:\n"
7495+ _ASM_EXTABLE(0b, 0b)
7496+#endif
7497+
7498+ "sete %1\n"
7499 : "+m" (l->a.counter), "=qm" (c)
7500 : : "memory");
7501 return c != 0;
7502@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7503 {
7504 unsigned char c;
7505
7506- asm volatile(_ASM_ADD "%2,%0; sets %1"
7507+ asm volatile(_ASM_ADD "%2,%0\n"
7508+
7509+#ifdef CONFIG_PAX_REFCOUNT
7510+ "jno 0f\n"
7511+ _ASM_SUB "%2,%0\n"
7512+ "int $4\n0:\n"
7513+ _ASM_EXTABLE(0b, 0b)
7514+#endif
7515+
7516+ "sets %1\n"
7517 : "+m" (l->a.counter), "=qm" (c)
7518 : "ir" (i) : "memory");
7519 return c;
7520@@ -133,7 +201,15 @@ static inline long local_add_return(long
7521 #endif
7522 /* Modern 486+ processor */
7523 __i = i;
7524- asm volatile(_ASM_XADD "%0, %1;"
7525+ asm volatile(_ASM_XADD "%0, %1\n"
7526+
7527+#ifdef CONFIG_PAX_REFCOUNT
7528+ "jno 0f\n"
7529+ _ASM_MOV "%0,%1\n"
7530+ "int $4\n0:\n"
7531+ _ASM_EXTABLE(0b, 0b)
7532+#endif
7533+
7534 : "+r" (i), "+m" (l->a.counter)
7535 : : "memory");
7536 return i + __i;
7537diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7538--- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7539+++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7540@@ -5,4 +5,14 @@
7541
7542 #include <asm-generic/mman.h>
7543
7544+#ifdef __KERNEL__
7545+#ifndef __ASSEMBLY__
7546+#ifdef CONFIG_X86_32
7547+#define arch_mmap_check i386_mmap_check
7548+int i386_mmap_check(unsigned long addr, unsigned long len,
7549+ unsigned long flags);
7550+#endif
7551+#endif
7552+#endif
7553+
7554 #endif /* _ASM_X86_MMAN_H */
7555diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7556--- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7557+++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-05 19:44:33.000000000 -0400
7558@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7559
7560 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7561 {
7562+
7563+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7564+ unsigned int i;
7565+ pgd_t *pgd;
7566+
7567+ pax_open_kernel();
7568+ pgd = get_cpu_pgd(smp_processor_id());
7569+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7570+ if (paravirt_enabled())
7571+ set_pgd(pgd+i, native_make_pgd(0));
7572+ else
7573+ pgd[i] = native_make_pgd(0);
7574+ pax_close_kernel();
7575+#endif
7576+
7577 #ifdef CONFIG_SMP
7578 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7579 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7580@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7581 struct task_struct *tsk)
7582 {
7583 unsigned cpu = smp_processor_id();
7584+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
7585+ int tlbstate = TLBSTATE_OK;
7586+#endif
7587
7588 if (likely(prev != next)) {
7589 #ifdef CONFIG_SMP
7590+#ifdef CONFIG_X86_32
7591+ tlbstate = percpu_read(cpu_tlbstate.state);
7592+#endif
7593 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7594 percpu_write(cpu_tlbstate.active_mm, next);
7595 #endif
7596 cpumask_set_cpu(cpu, mm_cpumask(next));
7597
7598 /* Re-load page tables */
7599+#ifdef CONFIG_PAX_PER_CPU_PGD
7600+ pax_open_kernel();
7601+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7602+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7603+ pax_close_kernel();
7604+ load_cr3(get_cpu_pgd(cpu));
7605+#else
7606 load_cr3(next->pgd);
7607+#endif
7608
7609 /* stop flush ipis for the previous mm */
7610 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7611@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7612 */
7613 if (unlikely(prev->context.ldt != next->context.ldt))
7614 load_LDT_nolock(&next->context);
7615- }
7616+
7617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7618+ if (!(__supported_pte_mask & _PAGE_NX)) {
7619+ smp_mb__before_clear_bit();
7620+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7621+ smp_mb__after_clear_bit();
7622+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7623+ }
7624+#endif
7625+
7626+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7627+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7628+ prev->context.user_cs_limit != next->context.user_cs_limit))
7629+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7630 #ifdef CONFIG_SMP
7631+ else if (unlikely(tlbstate != TLBSTATE_OK))
7632+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7633+#endif
7634+#endif
7635+
7636+ }
7637 else {
7638+
7639+#ifdef CONFIG_PAX_PER_CPU_PGD
7640+ pax_open_kernel();
7641+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7642+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7643+ pax_close_kernel();
7644+ load_cr3(get_cpu_pgd(cpu));
7645+#endif
7646+
7647+#ifdef CONFIG_SMP
7648 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7649 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7650
7651@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7652 * tlb flush IPI delivery. We must reload CR3
7653 * to make sure to use no freed page tables.
7654 */
7655+
7656+#ifndef CONFIG_PAX_PER_CPU_PGD
7657 load_cr3(next->pgd);
7658+#endif
7659+
7660 load_LDT_nolock(&next->context);
7661+
7662+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7663+ if (!(__supported_pte_mask & _PAGE_NX))
7664+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7665+#endif
7666+
7667+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7668+#ifdef CONFIG_PAX_PAGEEXEC
7669+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7670+#endif
7671+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7672+#endif
7673+
7674 }
7675- }
7676 #endif
7677+ }
7678 }
7679
7680 #define activate_mm(prev, next) \
7681diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7682--- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7683+++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7684@@ -9,10 +9,22 @@
7685 * we put the segment information here.
7686 */
7687 typedef struct {
7688- void *ldt;
7689+ struct desc_struct *ldt;
7690 int size;
7691 struct mutex lock;
7692- void *vdso;
7693+ unsigned long vdso;
7694+
7695+#ifdef CONFIG_X86_32
7696+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7697+ unsigned long user_cs_base;
7698+ unsigned long user_cs_limit;
7699+
7700+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7701+ cpumask_t cpu_user_cs_mask;
7702+#endif
7703+
7704+#endif
7705+#endif
7706
7707 #ifdef CONFIG_X86_64
7708 /* True if mm supports a task running in 32 bit compatibility mode. */
7709diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7710--- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7711+++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7712@@ -5,6 +5,7 @@
7713
7714 #ifdef CONFIG_X86_64
7715 /* X86_64 does not define MODULE_PROC_FAMILY */
7716+#define MODULE_PROC_FAMILY ""
7717 #elif defined CONFIG_M386
7718 #define MODULE_PROC_FAMILY "386 "
7719 #elif defined CONFIG_M486
7720@@ -59,8 +60,30 @@
7721 #error unknown processor family
7722 #endif
7723
7724-#ifdef CONFIG_X86_32
7725-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7726+#ifdef CONFIG_PAX_MEMORY_UDEREF
7727+#define MODULE_PAX_UDEREF "UDEREF "
7728+#else
7729+#define MODULE_PAX_UDEREF ""
7730+#endif
7731+
7732+#ifdef CONFIG_PAX_KERNEXEC
7733+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7734+#else
7735+#define MODULE_PAX_KERNEXEC ""
7736 #endif
7737
7738+#ifdef CONFIG_PAX_REFCOUNT
7739+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7740+#else
7741+#define MODULE_PAX_REFCOUNT ""
7742+#endif
7743+
7744+#ifdef CONFIG_GRKERNSEC
7745+#define MODULE_GRSEC "GRSECURITY "
7746+#else
7747+#define MODULE_GRSEC ""
7748+#endif
7749+
7750+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7751+
7752 #endif /* _ASM_X86_MODULE_H */
7753diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7754--- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7755+++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7756@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7757
7758 /* duplicated to the one in bootmem.h */
7759 extern unsigned long max_pfn;
7760-extern unsigned long phys_base;
7761+extern const unsigned long phys_base;
7762
7763 extern unsigned long __phys_addr(unsigned long);
7764 #define __phys_reloc_hide(x) (x)
7765diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7766--- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7767+++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7768@@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7769 pv_mmu_ops.set_fixmap(idx, phys, flags);
7770 }
7771
7772+#ifdef CONFIG_PAX_KERNEXEC
7773+static inline unsigned long pax_open_kernel(void)
7774+{
7775+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7776+}
7777+
7778+static inline unsigned long pax_close_kernel(void)
7779+{
7780+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7781+}
7782+#else
7783+static inline unsigned long pax_open_kernel(void) { return 0; }
7784+static inline unsigned long pax_close_kernel(void) { return 0; }
7785+#endif
7786+
7787 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7788
7789 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7790@@ -955,7 +970,7 @@ extern void default_banner(void);
7791
7792 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7793 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7794-#define PARA_INDIRECT(addr) *%cs:addr
7795+#define PARA_INDIRECT(addr) *%ss:addr
7796 #endif
7797
7798 #define INTERRUPT_RETURN \
7799@@ -1032,6 +1047,21 @@ extern void default_banner(void);
7800 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7801 CLBR_NONE, \
7802 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7803+
7804+#define GET_CR0_INTO_RDI \
7805+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7806+ mov %rax,%rdi
7807+
7808+#define SET_RDI_INTO_CR0 \
7809+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7810+
7811+#define GET_CR3_INTO_RDI \
7812+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7813+ mov %rax,%rdi
7814+
7815+#define SET_RDI_INTO_CR3 \
7816+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7817+
7818 #endif /* CONFIG_X86_32 */
7819
7820 #endif /* __ASSEMBLY__ */
7821diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7822--- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7823+++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7824@@ -78,19 +78,19 @@ struct pv_init_ops {
7825 */
7826 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7827 unsigned long addr, unsigned len);
7828-};
7829+} __no_const;
7830
7831
7832 struct pv_lazy_ops {
7833 /* Set deferred update mode, used for batching operations. */
7834 void (*enter)(void);
7835 void (*leave)(void);
7836-};
7837+} __no_const;
7838
7839 struct pv_time_ops {
7840 unsigned long long (*sched_clock)(void);
7841 unsigned long (*get_tsc_khz)(void);
7842-};
7843+} __no_const;
7844
7845 struct pv_cpu_ops {
7846 /* hooks for various privileged instructions */
7847@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7848
7849 void (*start_context_switch)(struct task_struct *prev);
7850 void (*end_context_switch)(struct task_struct *next);
7851-};
7852+} __no_const;
7853
7854 struct pv_irq_ops {
7855 /*
7856@@ -217,7 +217,7 @@ struct pv_apic_ops {
7857 unsigned long start_eip,
7858 unsigned long start_esp);
7859 #endif
7860-};
7861+} __no_const;
7862
7863 struct pv_mmu_ops {
7864 unsigned long (*read_cr2)(void);
7865@@ -317,6 +317,12 @@ struct pv_mmu_ops {
7866 an mfn. We can tell which is which from the index. */
7867 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7868 phys_addr_t phys, pgprot_t flags);
7869+
7870+#ifdef CONFIG_PAX_KERNEXEC
7871+ unsigned long (*pax_open_kernel)(void);
7872+ unsigned long (*pax_close_kernel)(void);
7873+#endif
7874+
7875 };
7876
7877 struct arch_spinlock;
7878@@ -327,7 +333,7 @@ struct pv_lock_ops {
7879 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7880 int (*spin_trylock)(struct arch_spinlock *lock);
7881 void (*spin_unlock)(struct arch_spinlock *lock);
7882-};
7883+} __no_const;
7884
7885 /* This contains all the paravirt structures: we get a convenient
7886 * number for each function using the offset which we use to indicate
7887diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7888--- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7889+++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7890@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7891 pmd_t *pmd, pte_t *pte)
7892 {
7893 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7894+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7895+}
7896+
7897+static inline void pmd_populate_user(struct mm_struct *mm,
7898+ pmd_t *pmd, pte_t *pte)
7899+{
7900+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7901 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7902 }
7903
7904diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7905--- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7906+++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7907@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7908
7909 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7910 {
7911+ pax_open_kernel();
7912 *pmdp = pmd;
7913+ pax_close_kernel();
7914 }
7915
7916 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7917diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7918--- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7919+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7920@@ -25,9 +25,6 @@
7921 struct mm_struct;
7922 struct vm_area_struct;
7923
7924-extern pgd_t swapper_pg_dir[1024];
7925-extern pgd_t initial_page_table[1024];
7926-
7927 static inline void pgtable_cache_init(void) { }
7928 static inline void check_pgt_cache(void) { }
7929 void paging_init(void);
7930@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7931 # include <asm/pgtable-2level.h>
7932 #endif
7933
7934+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7935+extern pgd_t initial_page_table[PTRS_PER_PGD];
7936+#ifdef CONFIG_X86_PAE
7937+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7938+#endif
7939+
7940 #if defined(CONFIG_HIGHPTE)
7941 #define pte_offset_map(dir, address) \
7942 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7943@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7944 /* Clear a kernel PTE and flush it from the TLB */
7945 #define kpte_clear_flush(ptep, vaddr) \
7946 do { \
7947+ pax_open_kernel(); \
7948 pte_clear(&init_mm, (vaddr), (ptep)); \
7949+ pax_close_kernel(); \
7950 __flush_tlb_one((vaddr)); \
7951 } while (0)
7952
7953@@ -74,6 +79,9 @@ do { \
7954
7955 #endif /* !__ASSEMBLY__ */
7956
7957+#define HAVE_ARCH_UNMAPPED_AREA
7958+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7959+
7960 /*
7961 * kern_addr_valid() is (1) for FLATMEM and (0) for
7962 * SPARSEMEM and DISCONTIGMEM
7963diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7964--- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7965+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7966@@ -8,7 +8,7 @@
7967 */
7968 #ifdef CONFIG_X86_PAE
7969 # include <asm/pgtable-3level_types.h>
7970-# define PMD_SIZE (1UL << PMD_SHIFT)
7971+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7972 # define PMD_MASK (~(PMD_SIZE - 1))
7973 #else
7974 # include <asm/pgtable-2level_types.h>
7975@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7976 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7977 #endif
7978
7979+#ifdef CONFIG_PAX_KERNEXEC
7980+#ifndef __ASSEMBLY__
7981+extern unsigned char MODULES_EXEC_VADDR[];
7982+extern unsigned char MODULES_EXEC_END[];
7983+#endif
7984+#include <asm/boot.h>
7985+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7986+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7987+#else
7988+#define ktla_ktva(addr) (addr)
7989+#define ktva_ktla(addr) (addr)
7990+#endif
7991+
7992 #define MODULES_VADDR VMALLOC_START
7993 #define MODULES_END VMALLOC_END
7994 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
7995diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
7996--- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
7997+++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
7998@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
7999
8000 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8001 {
8002+ pax_open_kernel();
8003 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8004+ pax_close_kernel();
8005 }
8006
8007 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8008 {
8009+ pax_open_kernel();
8010 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8011+ pax_close_kernel();
8012 }
8013
8014 /*
8015diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8016--- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8017+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8018@@ -16,10 +16,13 @@
8019
8020 extern pud_t level3_kernel_pgt[512];
8021 extern pud_t level3_ident_pgt[512];
8022+extern pud_t level3_vmalloc_pgt[512];
8023+extern pud_t level3_vmemmap_pgt[512];
8024+extern pud_t level2_vmemmap_pgt[512];
8025 extern pmd_t level2_kernel_pgt[512];
8026 extern pmd_t level2_fixmap_pgt[512];
8027-extern pmd_t level2_ident_pgt[512];
8028-extern pgd_t init_level4_pgt[];
8029+extern pmd_t level2_ident_pgt[512*2];
8030+extern pgd_t init_level4_pgt[512];
8031
8032 #define swapper_pg_dir init_level4_pgt
8033
8034@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8035
8036 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8037 {
8038+ pax_open_kernel();
8039 *pmdp = pmd;
8040+ pax_close_kernel();
8041 }
8042
8043 static inline void native_pmd_clear(pmd_t *pmd)
8044@@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8045
8046 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8047 {
8048+ pax_open_kernel();
8049 *pgdp = pgd;
8050+ pax_close_kernel();
8051 }
8052
8053 static inline void native_pgd_clear(pgd_t *pgd)
8054diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8055--- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8056+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8057@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8058 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8059 #define MODULES_END _AC(0xffffffffff000000, UL)
8060 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8061+#define MODULES_EXEC_VADDR MODULES_VADDR
8062+#define MODULES_EXEC_END MODULES_END
8063+
8064+#define ktla_ktva(addr) (addr)
8065+#define ktva_ktla(addr) (addr)
8066
8067 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8068diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8069--- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8070+++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8071@@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8072
8073 #define arch_end_context_switch(prev) do {} while(0)
8074
8075+#define pax_open_kernel() native_pax_open_kernel()
8076+#define pax_close_kernel() native_pax_close_kernel()
8077 #endif /* CONFIG_PARAVIRT */
8078
8079+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8080+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8081+
8082+#ifdef CONFIG_PAX_KERNEXEC
8083+static inline unsigned long native_pax_open_kernel(void)
8084+{
8085+ unsigned long cr0;
8086+
8087+ preempt_disable();
8088+ barrier();
8089+ cr0 = read_cr0() ^ X86_CR0_WP;
8090+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8091+ write_cr0(cr0);
8092+ return cr0 ^ X86_CR0_WP;
8093+}
8094+
8095+static inline unsigned long native_pax_close_kernel(void)
8096+{
8097+ unsigned long cr0;
8098+
8099+ cr0 = read_cr0() ^ X86_CR0_WP;
8100+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8101+ write_cr0(cr0);
8102+ barrier();
8103+ preempt_enable_no_resched();
8104+ return cr0 ^ X86_CR0_WP;
8105+}
8106+#else
8107+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8108+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8109+#endif
8110+
8111 /*
8112 * The following only work if pte_present() is true.
8113 * Undefined behaviour if not..
8114 */
8115+static inline int pte_user(pte_t pte)
8116+{
8117+ return pte_val(pte) & _PAGE_USER;
8118+}
8119+
8120 static inline int pte_dirty(pte_t pte)
8121 {
8122 return pte_flags(pte) & _PAGE_DIRTY;
8123@@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8124 return pte_clear_flags(pte, _PAGE_RW);
8125 }
8126
8127+static inline pte_t pte_mkread(pte_t pte)
8128+{
8129+ return __pte(pte_val(pte) | _PAGE_USER);
8130+}
8131+
8132 static inline pte_t pte_mkexec(pte_t pte)
8133 {
8134- return pte_clear_flags(pte, _PAGE_NX);
8135+#ifdef CONFIG_X86_PAE
8136+ if (__supported_pte_mask & _PAGE_NX)
8137+ return pte_clear_flags(pte, _PAGE_NX);
8138+ else
8139+#endif
8140+ return pte_set_flags(pte, _PAGE_USER);
8141+}
8142+
8143+static inline pte_t pte_exprotect(pte_t pte)
8144+{
8145+#ifdef CONFIG_X86_PAE
8146+ if (__supported_pte_mask & _PAGE_NX)
8147+ return pte_set_flags(pte, _PAGE_NX);
8148+ else
8149+#endif
8150+ return pte_clear_flags(pte, _PAGE_USER);
8151 }
8152
8153 static inline pte_t pte_mkdirty(pte_t pte)
8154@@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8155 #endif
8156
8157 #ifndef __ASSEMBLY__
8158+
8159+#ifdef CONFIG_PAX_PER_CPU_PGD
8160+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8161+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8162+{
8163+ return cpu_pgd[cpu];
8164+}
8165+#endif
8166+
8167 #include <linux/mm_types.h>
8168
8169 static inline int pte_none(pte_t pte)
8170@@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8171
8172 static inline int pgd_bad(pgd_t pgd)
8173 {
8174- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8175+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8176 }
8177
8178 static inline int pgd_none(pgd_t pgd)
8179@@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8180 * pgd_offset() returns a (pgd_t *)
8181 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8182 */
8183-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8184+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8185+
8186+#ifdef CONFIG_PAX_PER_CPU_PGD
8187+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8188+#endif
8189+
8190 /*
8191 * a shortcut which implies the use of the kernel's pgd, instead
8192 * of a process's
8193@@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8194 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8195 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8196
8197+#ifdef CONFIG_X86_32
8198+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8199+#else
8200+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8201+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8202+
8203+#ifdef CONFIG_PAX_MEMORY_UDEREF
8204+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8205+#else
8206+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8207+#endif
8208+
8209+#endif
8210+
8211 #ifndef __ASSEMBLY__
8212
8213 extern int direct_gbpages;
8214@@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8215 * dst and src can be on the same page, but the range must not overlap,
8216 * and must not cross a page boundary.
8217 */
8218-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8219+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8220 {
8221- memcpy(dst, src, count * sizeof(pgd_t));
8222+ pax_open_kernel();
8223+ while (count--)
8224+ *dst++ = *src++;
8225+ pax_close_kernel();
8226 }
8227
8228+#ifdef CONFIG_PAX_PER_CPU_PGD
8229+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8230+#endif
8231+
8232+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8233+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8234+#else
8235+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8236+#endif
8237
8238 #include <asm-generic/pgtable.h>
8239 #endif /* __ASSEMBLY__ */
8240diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8241--- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8242+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8243@@ -16,13 +16,12 @@
8244 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8245 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8246 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8247-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8248+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8249 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8250 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8251 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8252-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8253-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8254-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8255+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8256+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8257 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8258
8259 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8260@@ -40,7 +39,6 @@
8261 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8262 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8263 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8264-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8265 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8266 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8267 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8268@@ -57,8 +55,10 @@
8269
8270 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8271 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8272-#else
8273+#elif defined(CONFIG_KMEMCHECK)
8274 #define _PAGE_NX (_AT(pteval_t, 0))
8275+#else
8276+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8277 #endif
8278
8279 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8280@@ -96,6 +96,9 @@
8281 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8282 _PAGE_ACCESSED)
8283
8284+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8285+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8286+
8287 #define __PAGE_KERNEL_EXEC \
8288 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8289 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8290@@ -106,8 +109,8 @@
8291 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8292 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8293 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8294-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8295-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8296+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8297+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8298 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8299 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8300 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8301@@ -166,8 +169,8 @@
8302 * bits are combined, this will alow user to access the high address mapped
8303 * VDSO in the presence of CONFIG_COMPAT_VDSO
8304 */
8305-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8306-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8307+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8308+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8309 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8310 #endif
8311
8312@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8313 {
8314 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8315 }
8316+#endif
8317
8318+#if PAGETABLE_LEVELS == 3
8319+#include <asm-generic/pgtable-nopud.h>
8320+#endif
8321+
8322+#if PAGETABLE_LEVELS == 2
8323+#include <asm-generic/pgtable-nopmd.h>
8324+#endif
8325+
8326+#ifndef __ASSEMBLY__
8327 #if PAGETABLE_LEVELS > 3
8328 typedef struct { pudval_t pud; } pud_t;
8329
8330@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8331 return pud.pud;
8332 }
8333 #else
8334-#include <asm-generic/pgtable-nopud.h>
8335-
8336 static inline pudval_t native_pud_val(pud_t pud)
8337 {
8338 return native_pgd_val(pud.pgd);
8339@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8340 return pmd.pmd;
8341 }
8342 #else
8343-#include <asm-generic/pgtable-nopmd.h>
8344-
8345 static inline pmdval_t native_pmd_val(pmd_t pmd)
8346 {
8347 return native_pgd_val(pmd.pud.pgd);
8348@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8349
8350 extern pteval_t __supported_pte_mask;
8351 extern void set_nx(void);
8352-extern int nx_enabled;
8353
8354 #define pgprot_writecombine pgprot_writecombine
8355 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8356diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8357--- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8358+++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8359@@ -266,7 +266,7 @@ struct tss_struct {
8360
8361 } ____cacheline_aligned;
8362
8363-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8364+extern struct tss_struct init_tss[NR_CPUS];
8365
8366 /*
8367 * Save the original ist values for checking stack pointers during debugging
8368@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8369 */
8370 #define TASK_SIZE PAGE_OFFSET
8371 #define TASK_SIZE_MAX TASK_SIZE
8372+
8373+#ifdef CONFIG_PAX_SEGMEXEC
8374+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8375+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8376+#else
8377 #define STACK_TOP TASK_SIZE
8378-#define STACK_TOP_MAX STACK_TOP
8379+#endif
8380+
8381+#define STACK_TOP_MAX TASK_SIZE
8382
8383 #define INIT_THREAD { \
8384- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8385+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8386 .vm86_info = NULL, \
8387 .sysenter_cs = __KERNEL_CS, \
8388 .io_bitmap_ptr = NULL, \
8389@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define INIT_TSS { \
8392 .x86_tss = { \
8393- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8394+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8395 .ss0 = __KERNEL_DS, \
8396 .ss1 = __KERNEL_CS, \
8397 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8398@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8399 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8400
8401 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8402-#define KSTK_TOP(info) \
8403-({ \
8404- unsigned long *__ptr = (unsigned long *)(info); \
8405- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8406-})
8407+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8408
8409 /*
8410 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8411@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8412 #define task_pt_regs(task) \
8413 ({ \
8414 struct pt_regs *__regs__; \
8415- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8416+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8417 __regs__ - 1; \
8418 })
8419
8420@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8421 /*
8422 * User space process size. 47bits minus one guard page.
8423 */
8424-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8425+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8426
8427 /* This decides where the kernel will search for a free chunk of vm
8428 * space during mmap's.
8429 */
8430 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8431- 0xc0000000 : 0xFFFFe000)
8432+ 0xc0000000 : 0xFFFFf000)
8433
8434 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8435 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8436@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8437 #define STACK_TOP_MAX TASK_SIZE_MAX
8438
8439 #define INIT_THREAD { \
8440- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8441+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8442 }
8443
8444 #define INIT_TSS { \
8445- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8446+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8447 }
8448
8449 /*
8450@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8451 */
8452 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8453
8454+#ifdef CONFIG_PAX_SEGMEXEC
8455+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8456+#endif
8457+
8458 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8459
8460 /* Get/set a process' ability to use the timestamp counter instruction */
8461diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8462--- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8463+++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8464@@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8465 }
8466
8467 /*
8468- * user_mode_vm(regs) determines whether a register set came from user mode.
8469+ * user_mode(regs) determines whether a register set came from user mode.
8470 * This is true if V8086 mode was enabled OR if the register set was from
8471 * protected mode with RPL-3 CS value. This tricky test checks that with
8472 * one comparison. Many places in the kernel can bypass this full check
8473- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8474+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8475+ * be used.
8476 */
8477-static inline int user_mode(struct pt_regs *regs)
8478+static inline int user_mode_novm(struct pt_regs *regs)
8479 {
8480 #ifdef CONFIG_X86_32
8481 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8482 #else
8483- return !!(regs->cs & 3);
8484+ return !!(regs->cs & SEGMENT_RPL_MASK);
8485 #endif
8486 }
8487
8488-static inline int user_mode_vm(struct pt_regs *regs)
8489+static inline int user_mode(struct pt_regs *regs)
8490 {
8491 #ifdef CONFIG_X86_32
8492 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8493 USER_RPL;
8494 #else
8495- return user_mode(regs);
8496+ return user_mode_novm(regs);
8497 #endif
8498 }
8499
8500diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8501--- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8502+++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8503@@ -6,19 +6,19 @@
8504 struct pt_regs;
8505
8506 struct machine_ops {
8507- void (*restart)(char *cmd);
8508- void (*halt)(void);
8509- void (*power_off)(void);
8510+ void (* __noreturn restart)(char *cmd);
8511+ void (* __noreturn halt)(void);
8512+ void (* __noreturn power_off)(void);
8513 void (*shutdown)(void);
8514 void (*crash_shutdown)(struct pt_regs *);
8515- void (*emergency_restart)(void);
8516-};
8517+ void (* __noreturn emergency_restart)(void);
8518+} __no_const;
8519
8520 extern struct machine_ops machine_ops;
8521
8522 void native_machine_crash_shutdown(struct pt_regs *regs);
8523 void native_machine_shutdown(void);
8524-void machine_real_restart(unsigned int type);
8525+void machine_real_restart(unsigned int type) __noreturn;
8526 /* These must match dispatch_table in reboot_32.S */
8527 #define MRR_BIOS 0
8528 #define MRR_APM 1
8529diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8530--- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8531+++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8532@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8533 {
8534 asm volatile("# beginning down_read\n\t"
8535 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8536+
8537+#ifdef CONFIG_PAX_REFCOUNT
8538+ "jno 0f\n"
8539+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8540+ "int $4\n0:\n"
8541+ _ASM_EXTABLE(0b, 0b)
8542+#endif
8543+
8544 /* adds 0x00000001 */
8545 " jns 1f\n"
8546 " call call_rwsem_down_read_failed\n"
8547@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8548 "1:\n\t"
8549 " mov %1,%2\n\t"
8550 " add %3,%2\n\t"
8551+
8552+#ifdef CONFIG_PAX_REFCOUNT
8553+ "jno 0f\n"
8554+ "sub %3,%2\n"
8555+ "int $4\n0:\n"
8556+ _ASM_EXTABLE(0b, 0b)
8557+#endif
8558+
8559 " jle 2f\n\t"
8560 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8561 " jnz 1b\n\t"
8562@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8563 long tmp;
8564 asm volatile("# beginning down_write\n\t"
8565 LOCK_PREFIX " xadd %1,(%2)\n\t"
8566+
8567+#ifdef CONFIG_PAX_REFCOUNT
8568+ "jno 0f\n"
8569+ "mov %1,(%2)\n"
8570+ "int $4\n0:\n"
8571+ _ASM_EXTABLE(0b, 0b)
8572+#endif
8573+
8574 /* adds 0xffff0001, returns the old value */
8575 " test %1,%1\n\t"
8576 /* was the count 0 before? */
8577@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8578 long tmp;
8579 asm volatile("# beginning __up_read\n\t"
8580 LOCK_PREFIX " xadd %1,(%2)\n\t"
8581+
8582+#ifdef CONFIG_PAX_REFCOUNT
8583+ "jno 0f\n"
8584+ "mov %1,(%2)\n"
8585+ "int $4\n0:\n"
8586+ _ASM_EXTABLE(0b, 0b)
8587+#endif
8588+
8589 /* subtracts 1, returns the old value */
8590 " jns 1f\n\t"
8591 " call call_rwsem_wake\n" /* expects old value in %edx */
8592@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8593 long tmp;
8594 asm volatile("# beginning __up_write\n\t"
8595 LOCK_PREFIX " xadd %1,(%2)\n\t"
8596+
8597+#ifdef CONFIG_PAX_REFCOUNT
8598+ "jno 0f\n"
8599+ "mov %1,(%2)\n"
8600+ "int $4\n0:\n"
8601+ _ASM_EXTABLE(0b, 0b)
8602+#endif
8603+
8604 /* subtracts 0xffff0001, returns the old value */
8605 " jns 1f\n\t"
8606 " call call_rwsem_wake\n" /* expects old value in %edx */
8607@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8608 {
8609 asm volatile("# beginning __downgrade_write\n\t"
8610 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8611+
8612+#ifdef CONFIG_PAX_REFCOUNT
8613+ "jno 0f\n"
8614+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8615+ "int $4\n0:\n"
8616+ _ASM_EXTABLE(0b, 0b)
8617+#endif
8618+
8619 /*
8620 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8621 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8622@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8623 */
8624 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8625 {
8626- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8627+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8628+
8629+#ifdef CONFIG_PAX_REFCOUNT
8630+ "jno 0f\n"
8631+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8632+ "int $4\n0:\n"
8633+ _ASM_EXTABLE(0b, 0b)
8634+#endif
8635+
8636 : "+m" (sem->count)
8637 : "er" (delta));
8638 }
8639@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8640 {
8641 long tmp = delta;
8642
8643- asm volatile(LOCK_PREFIX "xadd %0,%1"
8644+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8645+
8646+#ifdef CONFIG_PAX_REFCOUNT
8647+ "jno 0f\n"
8648+ "mov %0,%1\n"
8649+ "int $4\n0:\n"
8650+ _ASM_EXTABLE(0b, 0b)
8651+#endif
8652+
8653 : "+r" (tmp), "+m" (sem->count)
8654 : : "memory");
8655
8656diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8657--- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8658+++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8659@@ -64,8 +64,8 @@
8660 * 26 - ESPFIX small SS
8661 * 27 - per-cpu [ offset to per-cpu data area ]
8662 * 28 - stack_canary-20 [ for stack protector ]
8663- * 29 - unused
8664- * 30 - unused
8665+ * 29 - PCI BIOS CS
8666+ * 30 - PCI BIOS DS
8667 * 31 - TSS for double fault handler
8668 */
8669 #define GDT_ENTRY_TLS_MIN 6
8670@@ -79,6 +79,8 @@
8671
8672 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8673
8674+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8675+
8676 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8677
8678 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8679@@ -104,6 +106,12 @@
8680 #define __KERNEL_STACK_CANARY 0
8681 #endif
8682
8683+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8684+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8685+
8686+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8687+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8688+
8689 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8690
8691 /*
8692@@ -141,7 +149,7 @@
8693 */
8694
8695 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8696-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8697+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8698
8699
8700 #else
8701@@ -165,6 +173,8 @@
8702 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8703 #define __USER32_DS __USER_DS
8704
8705+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8706+
8707 #define GDT_ENTRY_TSS 8 /* needs two entries */
8708 #define GDT_ENTRY_LDT 10 /* needs two entries */
8709 #define GDT_ENTRY_TLS_MIN 12
8710@@ -185,6 +195,7 @@
8711 #endif
8712
8713 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8714+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8715 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8716 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8717 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8718diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8719--- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8720+++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8721@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8722 /* cpus sharing the last level cache: */
8723 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8724 DECLARE_PER_CPU(u16, cpu_llc_id);
8725-DECLARE_PER_CPU(int, cpu_number);
8726+DECLARE_PER_CPU(unsigned int, cpu_number);
8727
8728 static inline struct cpumask *cpu_sibling_mask(int cpu)
8729 {
8730@@ -77,7 +77,7 @@ struct smp_ops {
8731
8732 void (*send_call_func_ipi)(const struct cpumask *mask);
8733 void (*send_call_func_single_ipi)(int cpu);
8734-};
8735+} __no_const;
8736
8737 /* Globals due to paravirt */
8738 extern void set_cpu_sibling_map(int cpu);
8739@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8740 extern int safe_smp_processor_id(void);
8741
8742 #elif defined(CONFIG_X86_64_SMP)
8743-#define raw_smp_processor_id() (percpu_read(cpu_number))
8744-
8745-#define stack_smp_processor_id() \
8746-({ \
8747- struct thread_info *ti; \
8748- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8749- ti->cpu; \
8750-})
8751+#define raw_smp_processor_id() (percpu_read(cpu_number))
8752+#define stack_smp_processor_id() raw_smp_processor_id()
8753 #define safe_smp_processor_id() smp_processor_id()
8754
8755 #endif
8756diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8757--- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8758+++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8759@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8760 static inline void arch_read_lock(arch_rwlock_t *rw)
8761 {
8762 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8763+
8764+#ifdef CONFIG_PAX_REFCOUNT
8765+ "jno 0f\n"
8766+ LOCK_PREFIX " addl $1,(%0)\n"
8767+ "int $4\n0:\n"
8768+ _ASM_EXTABLE(0b, 0b)
8769+#endif
8770+
8771 "jns 1f\n"
8772 "call __read_lock_failed\n\t"
8773 "1:\n"
8774@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8775 static inline void arch_write_lock(arch_rwlock_t *rw)
8776 {
8777 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8778+
8779+#ifdef CONFIG_PAX_REFCOUNT
8780+ "jno 0f\n"
8781+ LOCK_PREFIX " addl %1,(%0)\n"
8782+ "int $4\n0:\n"
8783+ _ASM_EXTABLE(0b, 0b)
8784+#endif
8785+
8786 "jz 1f\n"
8787 "call __write_lock_failed\n\t"
8788 "1:\n"
8789@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8790
8791 static inline void arch_read_unlock(arch_rwlock_t *rw)
8792 {
8793- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8794+ asm volatile(LOCK_PREFIX "incl %0\n"
8795+
8796+#ifdef CONFIG_PAX_REFCOUNT
8797+ "jno 0f\n"
8798+ LOCK_PREFIX "decl %0\n"
8799+ "int $4\n0:\n"
8800+ _ASM_EXTABLE(0b, 0b)
8801+#endif
8802+
8803+ :"+m" (rw->lock) : : "memory");
8804 }
8805
8806 static inline void arch_write_unlock(arch_rwlock_t *rw)
8807 {
8808- asm volatile(LOCK_PREFIX "addl %1, %0"
8809+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8810+
8811+#ifdef CONFIG_PAX_REFCOUNT
8812+ "jno 0f\n"
8813+ LOCK_PREFIX "subl %1, %0\n"
8814+ "int $4\n0:\n"
8815+ _ASM_EXTABLE(0b, 0b)
8816+#endif
8817+
8818 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8819 }
8820
8821diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8822--- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8823+++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8824@@ -48,7 +48,7 @@
8825 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8826 */
8827 #define GDT_STACK_CANARY_INIT \
8828- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8829+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8830
8831 /*
8832 * Initialize the stackprotector canary value.
8833@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8834
8835 static inline void load_stack_canary_segment(void)
8836 {
8837-#ifdef CONFIG_X86_32
8838+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8839 asm volatile ("mov %0, %%gs" : : "r" (0));
8840 #endif
8841 }
8842diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8843--- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8844+++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8845@@ -11,28 +11,20 @@
8846
8847 extern int kstack_depth_to_print;
8848
8849-struct thread_info;
8850+struct task_struct;
8851 struct stacktrace_ops;
8852
8853-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8854- unsigned long *stack,
8855- unsigned long bp,
8856- const struct stacktrace_ops *ops,
8857- void *data,
8858- unsigned long *end,
8859- int *graph);
8860-
8861-extern unsigned long
8862-print_context_stack(struct thread_info *tinfo,
8863- unsigned long *stack, unsigned long bp,
8864- const struct stacktrace_ops *ops, void *data,
8865- unsigned long *end, int *graph);
8866-
8867-extern unsigned long
8868-print_context_stack_bp(struct thread_info *tinfo,
8869- unsigned long *stack, unsigned long bp,
8870- const struct stacktrace_ops *ops, void *data,
8871- unsigned long *end, int *graph);
8872+typedef unsigned long walk_stack_t(struct task_struct *task,
8873+ void *stack_start,
8874+ unsigned long *stack,
8875+ unsigned long bp,
8876+ const struct stacktrace_ops *ops,
8877+ void *data,
8878+ unsigned long *end,
8879+ int *graph);
8880+
8881+extern walk_stack_t print_context_stack;
8882+extern walk_stack_t print_context_stack_bp;
8883
8884 /* Generic stack tracer with callbacks */
8885
8886@@ -43,7 +35,7 @@ struct stacktrace_ops {
8887 void (*address)(void *data, unsigned long address, int reliable);
8888 /* On negative return stop dumping */
8889 int (*stack)(void *data, char *name);
8890- walk_stack_t walk_stack;
8891+ walk_stack_t *walk_stack;
8892 };
8893
8894 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8895diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8896--- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8897+++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8898@@ -129,7 +129,7 @@ do { \
8899 "call __switch_to\n\t" \
8900 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8901 __switch_canary \
8902- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8903+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8904 "movq %%rax,%%rdi\n\t" \
8905 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8906 "jnz ret_from_fork\n\t" \
8907@@ -140,7 +140,7 @@ do { \
8908 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8909 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8910 [_tif_fork] "i" (_TIF_FORK), \
8911- [thread_info] "i" (offsetof(struct task_struct, stack)), \
8912+ [thread_info] "m" (current_tinfo), \
8913 [current_task] "m" (current_task) \
8914 __switch_canary_iparam \
8915 : "memory", "cc" __EXTRA_CLOBBER)
8916@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8917 {
8918 unsigned long __limit;
8919 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8920- return __limit + 1;
8921+ return __limit;
8922 }
8923
8924 static inline void native_clts(void)
8925@@ -340,12 +340,12 @@ void enable_hlt(void);
8926
8927 void cpu_idle_wait(void);
8928
8929-extern unsigned long arch_align_stack(unsigned long sp);
8930+#define arch_align_stack(x) ((x) & ~0xfUL)
8931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8932
8933 void default_idle(void);
8934
8935-void stop_this_cpu(void *dummy);
8936+void stop_this_cpu(void *dummy) __noreturn;
8937
8938 /*
8939 * Force strict CPU ordering.
8940diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8941--- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8942+++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8943@@ -10,6 +10,7 @@
8944 #include <linux/compiler.h>
8945 #include <asm/page.h>
8946 #include <asm/types.h>
8947+#include <asm/percpu.h>
8948
8949 /*
8950 * low level task data that entry.S needs immediate access to
8951@@ -24,7 +25,6 @@ struct exec_domain;
8952 #include <asm/atomic.h>
8953
8954 struct thread_info {
8955- struct task_struct *task; /* main task structure */
8956 struct exec_domain *exec_domain; /* execution domain */
8957 __u32 flags; /* low level flags */
8958 __u32 status; /* thread synchronous flags */
8959@@ -34,18 +34,12 @@ struct thread_info {
8960 mm_segment_t addr_limit;
8961 struct restart_block restart_block;
8962 void __user *sysenter_return;
8963-#ifdef CONFIG_X86_32
8964- unsigned long previous_esp; /* ESP of the previous stack in
8965- case of nested (IRQ) stacks
8966- */
8967- __u8 supervisor_stack[0];
8968-#endif
8969+ unsigned long lowest_stack;
8970 int uaccess_err;
8971 };
8972
8973-#define INIT_THREAD_INFO(tsk) \
8974+#define INIT_THREAD_INFO \
8975 { \
8976- .task = &tsk, \
8977 .exec_domain = &default_exec_domain, \
8978 .flags = 0, \
8979 .cpu = 0, \
8980@@ -56,7 +50,7 @@ struct thread_info {
8981 }, \
8982 }
8983
8984-#define init_thread_info (init_thread_union.thread_info)
8985+#define init_thread_info (init_thread_union.stack)
8986 #define init_stack (init_thread_union.stack)
8987
8988 #else /* !__ASSEMBLY__ */
8989@@ -170,6 +164,23 @@ struct thread_info {
8990 ret; \
8991 })
8992
8993+#ifdef __ASSEMBLY__
8994+/* how to get the thread information struct from ASM */
8995+#define GET_THREAD_INFO(reg) \
8996+ mov PER_CPU_VAR(current_tinfo), reg
8997+
8998+/* use this one if reg already contains %esp */
8999+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9000+#else
9001+/* how to get the thread information struct from C */
9002+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9003+
9004+static __always_inline struct thread_info *current_thread_info(void)
9005+{
9006+ return percpu_read_stable(current_tinfo);
9007+}
9008+#endif
9009+
9010 #ifdef CONFIG_X86_32
9011
9012 #define STACK_WARN (THREAD_SIZE/8)
9013@@ -180,35 +191,13 @@ struct thread_info {
9014 */
9015 #ifndef __ASSEMBLY__
9016
9017-
9018 /* how to get the current stack pointer from C */
9019 register unsigned long current_stack_pointer asm("esp") __used;
9020
9021-/* how to get the thread information struct from C */
9022-static inline struct thread_info *current_thread_info(void)
9023-{
9024- return (struct thread_info *)
9025- (current_stack_pointer & ~(THREAD_SIZE - 1));
9026-}
9027-
9028-#else /* !__ASSEMBLY__ */
9029-
9030-/* how to get the thread information struct from ASM */
9031-#define GET_THREAD_INFO(reg) \
9032- movl $-THREAD_SIZE, reg; \
9033- andl %esp, reg
9034-
9035-/* use this one if reg already contains %esp */
9036-#define GET_THREAD_INFO_WITH_ESP(reg) \
9037- andl $-THREAD_SIZE, reg
9038-
9039 #endif
9040
9041 #else /* X86_32 */
9042
9043-#include <asm/percpu.h>
9044-#define KERNEL_STACK_OFFSET (5*8)
9045-
9046 /*
9047 * macros/functions for gaining access to the thread information structure
9048 * preempt_count needs to be 1 initially, until the scheduler is functional.
9049@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9050 #ifndef __ASSEMBLY__
9051 DECLARE_PER_CPU(unsigned long, kernel_stack);
9052
9053-static inline struct thread_info *current_thread_info(void)
9054-{
9055- struct thread_info *ti;
9056- ti = (void *)(percpu_read_stable(kernel_stack) +
9057- KERNEL_STACK_OFFSET - THREAD_SIZE);
9058- return ti;
9059-}
9060-
9061-#else /* !__ASSEMBLY__ */
9062-
9063-/* how to get the thread information struct from ASM */
9064-#define GET_THREAD_INFO(reg) \
9065- movq PER_CPU_VAR(kernel_stack),reg ; \
9066- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9067-
9068+/* how to get the current stack pointer from C */
9069+register unsigned long current_stack_pointer asm("rsp") __used;
9070 #endif
9071
9072 #endif /* !X86_32 */
9073@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9074 extern void free_thread_info(struct thread_info *ti);
9075 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9076 #define arch_task_cache_init arch_task_cache_init
9077+
9078+#define __HAVE_THREAD_FUNCTIONS
9079+#define task_thread_info(task) (&(task)->tinfo)
9080+#define task_stack_page(task) ((task)->stack)
9081+#define setup_thread_stack(p, org) do {} while (0)
9082+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9083+
9084+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9085+extern struct task_struct *alloc_task_struct_node(int node);
9086+extern void free_task_struct(struct task_struct *);
9087+
9088 #endif
9089 #endif /* _ASM_X86_THREAD_INFO_H */
9090diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9091--- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9092+++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9093@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9094 static __always_inline unsigned long __must_check
9095 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9096 {
9097+ pax_track_stack();
9098+
9099+ if ((long)n < 0)
9100+ return n;
9101+
9102 if (__builtin_constant_p(n)) {
9103 unsigned long ret;
9104
9105@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9106 return ret;
9107 }
9108 }
9109+ if (!__builtin_constant_p(n))
9110+ check_object_size(from, n, true);
9111 return __copy_to_user_ll(to, from, n);
9112 }
9113
9114@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9115 __copy_to_user(void __user *to, const void *from, unsigned long n)
9116 {
9117 might_fault();
9118+
9119 return __copy_to_user_inatomic(to, from, n);
9120 }
9121
9122 static __always_inline unsigned long
9123 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9124 {
9125+ if ((long)n < 0)
9126+ return n;
9127+
9128 /* Avoid zeroing the tail if the copy fails..
9129 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9130 * but as the zeroing behaviour is only significant when n is not
9131@@ -138,6 +149,12 @@ static __always_inline unsigned long
9132 __copy_from_user(void *to, const void __user *from, unsigned long n)
9133 {
9134 might_fault();
9135+
9136+ pax_track_stack();
9137+
9138+ if ((long)n < 0)
9139+ return n;
9140+
9141 if (__builtin_constant_p(n)) {
9142 unsigned long ret;
9143
9144@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9145 return ret;
9146 }
9147 }
9148+ if (!__builtin_constant_p(n))
9149+ check_object_size(to, n, false);
9150 return __copy_from_user_ll(to, from, n);
9151 }
9152
9153@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9154 const void __user *from, unsigned long n)
9155 {
9156 might_fault();
9157+
9158+ if ((long)n < 0)
9159+ return n;
9160+
9161 if (__builtin_constant_p(n)) {
9162 unsigned long ret;
9163
9164@@ -182,15 +205,19 @@ static __always_inline unsigned long
9165 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9166 unsigned long n)
9167 {
9168- return __copy_from_user_ll_nocache_nozero(to, from, n);
9169-}
9170+ if ((long)n < 0)
9171+ return n;
9172
9173-unsigned long __must_check copy_to_user(void __user *to,
9174- const void *from, unsigned long n);
9175-unsigned long __must_check _copy_from_user(void *to,
9176- const void __user *from,
9177- unsigned long n);
9178+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9179+}
9180
9181+extern void copy_to_user_overflow(void)
9182+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9183+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9184+#else
9185+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9186+#endif
9187+;
9188
9189 extern void copy_from_user_overflow(void)
9190 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9191@@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9192 #endif
9193 ;
9194
9195-static inline unsigned long __must_check copy_from_user(void *to,
9196- const void __user *from,
9197- unsigned long n)
9198+/**
9199+ * copy_to_user: - Copy a block of data into user space.
9200+ * @to: Destination address, in user space.
9201+ * @from: Source address, in kernel space.
9202+ * @n: Number of bytes to copy.
9203+ *
9204+ * Context: User context only. This function may sleep.
9205+ *
9206+ * Copy data from kernel space to user space.
9207+ *
9208+ * Returns number of bytes that could not be copied.
9209+ * On success, this will be zero.
9210+ */
9211+static inline unsigned long __must_check
9212+copy_to_user(void __user *to, const void *from, unsigned long n)
9213+{
9214+ int sz = __compiletime_object_size(from);
9215+
9216+ if (unlikely(sz != -1 && sz < n))
9217+ copy_to_user_overflow();
9218+ else if (access_ok(VERIFY_WRITE, to, n))
9219+ n = __copy_to_user(to, from, n);
9220+ return n;
9221+}
9222+
9223+/**
9224+ * copy_from_user: - Copy a block of data from user space.
9225+ * @to: Destination address, in kernel space.
9226+ * @from: Source address, in user space.
9227+ * @n: Number of bytes to copy.
9228+ *
9229+ * Context: User context only. This function may sleep.
9230+ *
9231+ * Copy data from user space to kernel space.
9232+ *
9233+ * Returns number of bytes that could not be copied.
9234+ * On success, this will be zero.
9235+ *
9236+ * If some data could not be copied, this function will pad the copied
9237+ * data to the requested size using zero bytes.
9238+ */
9239+static inline unsigned long __must_check
9240+copy_from_user(void *to, const void __user *from, unsigned long n)
9241 {
9242 int sz = __compiletime_object_size(to);
9243
9244- if (likely(sz == -1 || sz >= n))
9245- n = _copy_from_user(to, from, n);
9246- else
9247+ if (unlikely(sz != -1 && sz < n))
9248 copy_from_user_overflow();
9249-
9250+ else if (access_ok(VERIFY_READ, from, n))
9251+ n = __copy_from_user(to, from, n);
9252+ else if ((long)n > 0) {
9253+ if (!__builtin_constant_p(n))
9254+ check_object_size(to, n, false);
9255+ memset(to, 0, n);
9256+ }
9257 return n;
9258 }
9259
9260diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9261--- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9262+++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9263@@ -11,6 +11,9 @@
9264 #include <asm/alternative.h>
9265 #include <asm/cpufeature.h>
9266 #include <asm/page.h>
9267+#include <asm/pgtable.h>
9268+
9269+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9270
9271 /*
9272 * Copy To/From Userspace
9273@@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9274 return ret;
9275 }
9276
9277-__must_check unsigned long
9278-_copy_to_user(void __user *to, const void *from, unsigned len);
9279-__must_check unsigned long
9280-_copy_from_user(void *to, const void __user *from, unsigned len);
9281+static __always_inline __must_check unsigned long
9282+__copy_to_user(void __user *to, const void *from, unsigned len);
9283+static __always_inline __must_check unsigned long
9284+__copy_from_user(void *to, const void __user *from, unsigned len);
9285 __must_check unsigned long
9286 copy_in_user(void __user *to, const void __user *from, unsigned len);
9287
9288 static inline unsigned long __must_check copy_from_user(void *to,
9289 const void __user *from,
9290- unsigned long n)
9291+ unsigned n)
9292 {
9293- int sz = __compiletime_object_size(to);
9294-
9295 might_fault();
9296- if (likely(sz == -1 || sz >= n))
9297- n = _copy_from_user(to, from, n);
9298-#ifdef CONFIG_DEBUG_VM
9299- else
9300- WARN(1, "Buffer overflow detected!\n");
9301-#endif
9302+
9303+ if (access_ok(VERIFY_READ, from, n))
9304+ n = __copy_from_user(to, from, n);
9305+ else if ((int)n > 0) {
9306+ if (!__builtin_constant_p(n))
9307+ check_object_size(to, n, false);
9308+ memset(to, 0, n);
9309+ }
9310 return n;
9311 }
9312
9313@@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9314 {
9315 might_fault();
9316
9317- return _copy_to_user(dst, src, size);
9318+ if (access_ok(VERIFY_WRITE, dst, size))
9319+ size = __copy_to_user(dst, src, size);
9320+ return size;
9321 }
9322
9323 static __always_inline __must_check
9324-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9325+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9326 {
9327- int ret = 0;
9328+ int sz = __compiletime_object_size(dst);
9329+ unsigned ret = 0;
9330
9331 might_fault();
9332- if (!__builtin_constant_p(size))
9333- return copy_user_generic(dst, (__force void *)src, size);
9334+
9335+ pax_track_stack();
9336+
9337+ if ((int)size < 0)
9338+ return size;
9339+
9340+#ifdef CONFIG_PAX_MEMORY_UDEREF
9341+ if (!__access_ok(VERIFY_READ, src, size))
9342+ return size;
9343+#endif
9344+
9345+ if (unlikely(sz != -1 && sz < size)) {
9346+#ifdef CONFIG_DEBUG_VM
9347+ WARN(1, "Buffer overflow detected!\n");
9348+#endif
9349+ return size;
9350+ }
9351+
9352+ if (!__builtin_constant_p(size)) {
9353+ check_object_size(dst, size, false);
9354+
9355+#ifdef CONFIG_PAX_MEMORY_UDEREF
9356+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9357+ src += PAX_USER_SHADOW_BASE;
9358+#endif
9359+
9360+ return copy_user_generic(dst, (__force const void *)src, size);
9361+ }
9362 switch (size) {
9363- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9364+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9365 ret, "b", "b", "=q", 1);
9366 return ret;
9367- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9368+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9369 ret, "w", "w", "=r", 2);
9370 return ret;
9371- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9372+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9373 ret, "l", "k", "=r", 4);
9374 return ret;
9375- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9376+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9377 ret, "q", "", "=r", 8);
9378 return ret;
9379 case 10:
9380- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9381+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9382 ret, "q", "", "=r", 10);
9383 if (unlikely(ret))
9384 return ret;
9385 __get_user_asm(*(u16 *)(8 + (char *)dst),
9386- (u16 __user *)(8 + (char __user *)src),
9387+ (const u16 __user *)(8 + (const char __user *)src),
9388 ret, "w", "w", "=r", 2);
9389 return ret;
9390 case 16:
9391- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9392+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9393 ret, "q", "", "=r", 16);
9394 if (unlikely(ret))
9395 return ret;
9396 __get_user_asm(*(u64 *)(8 + (char *)dst),
9397- (u64 __user *)(8 + (char __user *)src),
9398+ (const u64 __user *)(8 + (const char __user *)src),
9399 ret, "q", "", "=r", 8);
9400 return ret;
9401 default:
9402- return copy_user_generic(dst, (__force void *)src, size);
9403+
9404+#ifdef CONFIG_PAX_MEMORY_UDEREF
9405+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9406+ src += PAX_USER_SHADOW_BASE;
9407+#endif
9408+
9409+ return copy_user_generic(dst, (__force const void *)src, size);
9410 }
9411 }
9412
9413 static __always_inline __must_check
9414-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9415+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9416 {
9417- int ret = 0;
9418+ int sz = __compiletime_object_size(src);
9419+ unsigned ret = 0;
9420
9421 might_fault();
9422- if (!__builtin_constant_p(size))
9423+
9424+ pax_track_stack();
9425+
9426+ if ((int)size < 0)
9427+ return size;
9428+
9429+#ifdef CONFIG_PAX_MEMORY_UDEREF
9430+ if (!__access_ok(VERIFY_WRITE, dst, size))
9431+ return size;
9432+#endif
9433+
9434+ if (unlikely(sz != -1 && sz < size)) {
9435+#ifdef CONFIG_DEBUG_VM
9436+ WARN(1, "Buffer overflow detected!\n");
9437+#endif
9438+ return size;
9439+ }
9440+
9441+ if (!__builtin_constant_p(size)) {
9442+ check_object_size(src, size, true);
9443+
9444+#ifdef CONFIG_PAX_MEMORY_UDEREF
9445+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9446+ dst += PAX_USER_SHADOW_BASE;
9447+#endif
9448+
9449 return copy_user_generic((__force void *)dst, src, size);
9450+ }
9451 switch (size) {
9452- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9453+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9454 ret, "b", "b", "iq", 1);
9455 return ret;
9456- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9457+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9458 ret, "w", "w", "ir", 2);
9459 return ret;
9460- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9461+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9462 ret, "l", "k", "ir", 4);
9463 return ret;
9464- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9465+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9466 ret, "q", "", "er", 8);
9467 return ret;
9468 case 10:
9469- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9470+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9471 ret, "q", "", "er", 10);
9472 if (unlikely(ret))
9473 return ret;
9474 asm("":::"memory");
9475- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9476+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9477 ret, "w", "w", "ir", 2);
9478 return ret;
9479 case 16:
9480- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9481+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9482 ret, "q", "", "er", 16);
9483 if (unlikely(ret))
9484 return ret;
9485 asm("":::"memory");
9486- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9487+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9488 ret, "q", "", "er", 8);
9489 return ret;
9490 default:
9491+
9492+#ifdef CONFIG_PAX_MEMORY_UDEREF
9493+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9494+ dst += PAX_USER_SHADOW_BASE;
9495+#endif
9496+
9497 return copy_user_generic((__force void *)dst, src, size);
9498 }
9499 }
9500
9501 static __always_inline __must_check
9502-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9503+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9504 {
9505- int ret = 0;
9506+ unsigned ret = 0;
9507
9508 might_fault();
9509- if (!__builtin_constant_p(size))
9510+
9511+ if ((int)size < 0)
9512+ return size;
9513+
9514+#ifdef CONFIG_PAX_MEMORY_UDEREF
9515+ if (!__access_ok(VERIFY_READ, src, size))
9516+ return size;
9517+ if (!__access_ok(VERIFY_WRITE, dst, size))
9518+ return size;
9519+#endif
9520+
9521+ if (!__builtin_constant_p(size)) {
9522+
9523+#ifdef CONFIG_PAX_MEMORY_UDEREF
9524+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9525+ src += PAX_USER_SHADOW_BASE;
9526+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9527+ dst += PAX_USER_SHADOW_BASE;
9528+#endif
9529+
9530 return copy_user_generic((__force void *)dst,
9531- (__force void *)src, size);
9532+ (__force const void *)src, size);
9533+ }
9534 switch (size) {
9535 case 1: {
9536 u8 tmp;
9537- __get_user_asm(tmp, (u8 __user *)src,
9538+ __get_user_asm(tmp, (const u8 __user *)src,
9539 ret, "b", "b", "=q", 1);
9540 if (likely(!ret))
9541 __put_user_asm(tmp, (u8 __user *)dst,
9542@@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9543 }
9544 case 2: {
9545 u16 tmp;
9546- __get_user_asm(tmp, (u16 __user *)src,
9547+ __get_user_asm(tmp, (const u16 __user *)src,
9548 ret, "w", "w", "=r", 2);
9549 if (likely(!ret))
9550 __put_user_asm(tmp, (u16 __user *)dst,
9551@@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9552
9553 case 4: {
9554 u32 tmp;
9555- __get_user_asm(tmp, (u32 __user *)src,
9556+ __get_user_asm(tmp, (const u32 __user *)src,
9557 ret, "l", "k", "=r", 4);
9558 if (likely(!ret))
9559 __put_user_asm(tmp, (u32 __user *)dst,
9560@@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9561 }
9562 case 8: {
9563 u64 tmp;
9564- __get_user_asm(tmp, (u64 __user *)src,
9565+ __get_user_asm(tmp, (const u64 __user *)src,
9566 ret, "q", "", "=r", 8);
9567 if (likely(!ret))
9568 __put_user_asm(tmp, (u64 __user *)dst,
9569@@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9570 return ret;
9571 }
9572 default:
9573+
9574+#ifdef CONFIG_PAX_MEMORY_UDEREF
9575+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9576+ src += PAX_USER_SHADOW_BASE;
9577+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9578+ dst += PAX_USER_SHADOW_BASE;
9579+#endif
9580+
9581 return copy_user_generic((__force void *)dst,
9582- (__force void *)src, size);
9583+ (__force const void *)src, size);
9584 }
9585 }
9586
9587@@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9588 static __must_check __always_inline int
9589 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9590 {
9591+ pax_track_stack();
9592+
9593+ if ((int)size < 0)
9594+ return size;
9595+
9596+#ifdef CONFIG_PAX_MEMORY_UDEREF
9597+ if (!__access_ok(VERIFY_READ, src, size))
9598+ return size;
9599+
9600+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9601+ src += PAX_USER_SHADOW_BASE;
9602+#endif
9603+
9604 return copy_user_generic(dst, (__force const void *)src, size);
9605 }
9606
9607-static __must_check __always_inline int
9608+static __must_check __always_inline unsigned long
9609 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9610 {
9611+ if ((int)size < 0)
9612+ return size;
9613+
9614+#ifdef CONFIG_PAX_MEMORY_UDEREF
9615+ if (!__access_ok(VERIFY_WRITE, dst, size))
9616+ return size;
9617+
9618+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9619+ dst += PAX_USER_SHADOW_BASE;
9620+#endif
9621+
9622 return copy_user_generic((__force void *)dst, src, size);
9623 }
9624
9625-extern long __copy_user_nocache(void *dst, const void __user *src,
9626+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9627 unsigned size, int zerorest);
9628
9629-static inline int
9630-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9631+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9632 {
9633 might_sleep();
9634+
9635+ if ((int)size < 0)
9636+ return size;
9637+
9638+#ifdef CONFIG_PAX_MEMORY_UDEREF
9639+ if (!__access_ok(VERIFY_READ, src, size))
9640+ return size;
9641+#endif
9642+
9643 return __copy_user_nocache(dst, src, size, 1);
9644 }
9645
9646-static inline int
9647-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9648+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9649 unsigned size)
9650 {
9651+ if ((int)size < 0)
9652+ return size;
9653+
9654+#ifdef CONFIG_PAX_MEMORY_UDEREF
9655+ if (!__access_ok(VERIFY_READ, src, size))
9656+ return size;
9657+#endif
9658+
9659 return __copy_user_nocache(dst, src, size, 0);
9660 }
9661
9662-unsigned long
9663+extern unsigned long
9664 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9665
9666 #endif /* _ASM_X86_UACCESS_64_H */
9667diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9668--- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9669+++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9670@@ -8,12 +8,15 @@
9671 #include <linux/thread_info.h>
9672 #include <linux/prefetch.h>
9673 #include <linux/string.h>
9674+#include <linux/sched.h>
9675 #include <asm/asm.h>
9676 #include <asm/page.h>
9677
9678 #define VERIFY_READ 0
9679 #define VERIFY_WRITE 1
9680
9681+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9682+
9683 /*
9684 * The fs value determines whether argument validity checking should be
9685 * performed or not. If get_fs() == USER_DS, checking is performed, with
9686@@ -29,7 +32,12 @@
9687
9688 #define get_ds() (KERNEL_DS)
9689 #define get_fs() (current_thread_info()->addr_limit)
9690+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9691+void __set_fs(mm_segment_t x);
9692+void set_fs(mm_segment_t x);
9693+#else
9694 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9695+#endif
9696
9697 #define segment_eq(a, b) ((a).seg == (b).seg)
9698
9699@@ -77,7 +85,33 @@
9700 * checks that the pointer is in the user space range - after calling
9701 * this function, memory access functions may still return -EFAULT.
9702 */
9703-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9704+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9705+#define access_ok(type, addr, size) \
9706+({ \
9707+ long __size = size; \
9708+ unsigned long __addr = (unsigned long)addr; \
9709+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9710+ unsigned long __end_ao = __addr + __size - 1; \
9711+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9712+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9713+ while(__addr_ao <= __end_ao) { \
9714+ char __c_ao; \
9715+ __addr_ao += PAGE_SIZE; \
9716+ if (__size > PAGE_SIZE) \
9717+ cond_resched(); \
9718+ if (__get_user(__c_ao, (char __user *)__addr)) \
9719+ break; \
9720+ if (type != VERIFY_WRITE) { \
9721+ __addr = __addr_ao; \
9722+ continue; \
9723+ } \
9724+ if (__put_user(__c_ao, (char __user *)__addr)) \
9725+ break; \
9726+ __addr = __addr_ao; \
9727+ } \
9728+ } \
9729+ __ret_ao; \
9730+})
9731
9732 /*
9733 * The exception table consists of pairs of addresses: the first is the
9734@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9735 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9736 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9737
9738-
9739+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9740+#define __copyuser_seg "gs;"
9741+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9742+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9743+#else
9744+#define __copyuser_seg
9745+#define __COPYUSER_SET_ES
9746+#define __COPYUSER_RESTORE_ES
9747+#endif
9748
9749 #ifdef CONFIG_X86_32
9750 #define __put_user_asm_u64(x, addr, err, errret) \
9751- asm volatile("1: movl %%eax,0(%2)\n" \
9752- "2: movl %%edx,4(%2)\n" \
9753+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9754+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9755 "3:\n" \
9756 ".section .fixup,\"ax\"\n" \
9757 "4: movl %3,%0\n" \
9758@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9759 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9760
9761 #define __put_user_asm_ex_u64(x, addr) \
9762- asm volatile("1: movl %%eax,0(%1)\n" \
9763- "2: movl %%edx,4(%1)\n" \
9764+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9765+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9766 "3:\n" \
9767 _ASM_EXTABLE(1b, 2b - 1b) \
9768 _ASM_EXTABLE(2b, 3b - 2b) \
9769@@ -374,7 +416,7 @@ do { \
9770 } while (0)
9771
9772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9773- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9774+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9775 "2:\n" \
9776 ".section .fixup,\"ax\"\n" \
9777 "3: mov %3,%0\n" \
9778@@ -382,7 +424,7 @@ do { \
9779 " jmp 2b\n" \
9780 ".previous\n" \
9781 _ASM_EXTABLE(1b, 3b) \
9782- : "=r" (err), ltype(x) \
9783+ : "=r" (err), ltype (x) \
9784 : "m" (__m(addr)), "i" (errret), "0" (err))
9785
9786 #define __get_user_size_ex(x, ptr, size) \
9787@@ -407,7 +449,7 @@ do { \
9788 } while (0)
9789
9790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9791- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9792+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9793 "2:\n" \
9794 _ASM_EXTABLE(1b, 2b - 1b) \
9795 : ltype(x) : "m" (__m(addr)))
9796@@ -424,13 +466,24 @@ do { \
9797 int __gu_err; \
9798 unsigned long __gu_val; \
9799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9800- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9801+ (x) = (__typeof__(*(ptr)))__gu_val; \
9802 __gu_err; \
9803 })
9804
9805 /* FIXME: this hack is definitely wrong -AK */
9806 struct __large_struct { unsigned long buf[100]; };
9807-#define __m(x) (*(struct __large_struct __user *)(x))
9808+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9809+#define ____m(x) \
9810+({ \
9811+ unsigned long ____x = (unsigned long)(x); \
9812+ if (____x < PAX_USER_SHADOW_BASE) \
9813+ ____x += PAX_USER_SHADOW_BASE; \
9814+ (void __user *)____x; \
9815+})
9816+#else
9817+#define ____m(x) (x)
9818+#endif
9819+#define __m(x) (*(struct __large_struct __user *)____m(x))
9820
9821 /*
9822 * Tell gcc we read from memory instead of writing: this is because
9823@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9824 * aliasing issues.
9825 */
9826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9827- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9828+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9829 "2:\n" \
9830 ".section .fixup,\"ax\"\n" \
9831 "3: mov %3,%0\n" \
9832@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9833 ".previous\n" \
9834 _ASM_EXTABLE(1b, 3b) \
9835 : "=r"(err) \
9836- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9837+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9838
9839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9840- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9841+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9842 "2:\n" \
9843 _ASM_EXTABLE(1b, 2b - 1b) \
9844 : : ltype(x), "m" (__m(addr)))
9845@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9846 * On error, the variable @x is set to zero.
9847 */
9848
9849+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9850+#define __get_user(x, ptr) get_user((x), (ptr))
9851+#else
9852 #define __get_user(x, ptr) \
9853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9854+#endif
9855
9856 /**
9857 * __put_user: - Write a simple value into user space, with less checking.
9858@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9859 * Returns zero on success, or -EFAULT on error.
9860 */
9861
9862+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9863+#define __put_user(x, ptr) put_user((x), (ptr))
9864+#else
9865 #define __put_user(x, ptr) \
9866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9867+#endif
9868
9869 #define __get_user_unaligned __get_user
9870 #define __put_user_unaligned __put_user
9871@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9872 #define get_user_ex(x, ptr) do { \
9873 unsigned long __gue_val; \
9874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9875- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9876+ (x) = (__typeof__(*(ptr)))__gue_val; \
9877 } while (0)
9878
9879 #ifdef CONFIG_X86_WP_WORKS_OK
9880@@ -567,6 +628,7 @@ extern struct movsl_mask {
9881
9882 #define ARCH_HAS_NOCACHE_UACCESS 1
9883
9884+#define ARCH_HAS_SORT_EXTABLE
9885 #ifdef CONFIG_X86_32
9886 # include "uaccess_32.h"
9887 #else
9888diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9889--- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9890+++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9891@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9892 int sysctl_enabled;
9893 struct timezone sys_tz;
9894 struct { /* extract of a clocksource struct */
9895+ char name[8];
9896 cycle_t (*vread)(void);
9897 cycle_t cycle_last;
9898 cycle_t mask;
9899diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9900--- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9901+++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9902@@ -15,9 +15,10 @@ enum vsyscall_num {
9903
9904 #ifdef __KERNEL__
9905 #include <linux/seqlock.h>
9906+#include <linux/getcpu.h>
9907+#include <linux/time.h>
9908
9909 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9910-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9911
9912 /* Definitions for CONFIG_GENERIC_TIME definitions */
9913 #define __section_vsyscall_gtod_data __attribute__ \
9914@@ -31,7 +32,6 @@ enum vsyscall_num {
9915 #define VGETCPU_LSL 2
9916
9917 extern int __vgetcpu_mode;
9918-extern volatile unsigned long __jiffies;
9919
9920 /* kernel space (writeable) */
9921 extern int vgetcpu_mode;
9922@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9923
9924 extern void map_vsyscall(void);
9925
9926+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9927+extern time_t vtime(time_t *t);
9928+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9929 #endif /* __KERNEL__ */
9930
9931 #endif /* _ASM_X86_VSYSCALL_H */
9932diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9933--- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9934+++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9935@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9936 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9937 void (*find_smp_config)(void);
9938 void (*get_smp_config)(unsigned int early);
9939-};
9940+} __no_const;
9941
9942 /**
9943 * struct x86_init_resources - platform specific resource related ops
9944@@ -42,7 +42,7 @@ struct x86_init_resources {
9945 void (*probe_roms)(void);
9946 void (*reserve_resources)(void);
9947 char *(*memory_setup)(void);
9948-};
9949+} __no_const;
9950
9951 /**
9952 * struct x86_init_irqs - platform specific interrupt setup
9953@@ -55,7 +55,7 @@ struct x86_init_irqs {
9954 void (*pre_vector_init)(void);
9955 void (*intr_init)(void);
9956 void (*trap_init)(void);
9957-};
9958+} __no_const;
9959
9960 /**
9961 * struct x86_init_oem - oem platform specific customizing functions
9962@@ -65,7 +65,7 @@ struct x86_init_irqs {
9963 struct x86_init_oem {
9964 void (*arch_setup)(void);
9965 void (*banner)(void);
9966-};
9967+} __no_const;
9968
9969 /**
9970 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9971@@ -76,7 +76,7 @@ struct x86_init_oem {
9972 */
9973 struct x86_init_mapping {
9974 void (*pagetable_reserve)(u64 start, u64 end);
9975-};
9976+} __no_const;
9977
9978 /**
9979 * struct x86_init_paging - platform specific paging functions
9980@@ -86,7 +86,7 @@ struct x86_init_mapping {
9981 struct x86_init_paging {
9982 void (*pagetable_setup_start)(pgd_t *base);
9983 void (*pagetable_setup_done)(pgd_t *base);
9984-};
9985+} __no_const;
9986
9987 /**
9988 * struct x86_init_timers - platform specific timer setup
9989@@ -101,7 +101,7 @@ struct x86_init_timers {
9990 void (*tsc_pre_init)(void);
9991 void (*timer_init)(void);
9992 void (*wallclock_init)(void);
9993-};
9994+} __no_const;
9995
9996 /**
9997 * struct x86_init_iommu - platform specific iommu setup
9998@@ -109,7 +109,7 @@ struct x86_init_timers {
9999 */
10000 struct x86_init_iommu {
10001 int (*iommu_init)(void);
10002-};
10003+} __no_const;
10004
10005 /**
10006 * struct x86_init_pci - platform specific pci init functions
10007@@ -123,7 +123,7 @@ struct x86_init_pci {
10008 int (*init)(void);
10009 void (*init_irq)(void);
10010 void (*fixup_irqs)(void);
10011-};
10012+} __no_const;
10013
10014 /**
10015 * struct x86_init_ops - functions for platform specific setup
10016@@ -139,7 +139,7 @@ struct x86_init_ops {
10017 struct x86_init_timers timers;
10018 struct x86_init_iommu iommu;
10019 struct x86_init_pci pci;
10020-};
10021+} __no_const;
10022
10023 /**
10024 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10025@@ -147,7 +147,7 @@ struct x86_init_ops {
10026 */
10027 struct x86_cpuinit_ops {
10028 void (*setup_percpu_clockev)(void);
10029-};
10030+} __no_const;
10031
10032 /**
10033 * struct x86_platform_ops - platform specific runtime functions
10034@@ -166,7 +166,7 @@ struct x86_platform_ops {
10035 bool (*is_untracked_pat_range)(u64 start, u64 end);
10036 void (*nmi_init)(void);
10037 int (*i8042_detect)(void);
10038-};
10039+} __no_const;
10040
10041 struct pci_dev;
10042
10043@@ -174,7 +174,7 @@ struct x86_msi_ops {
10044 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10045 void (*teardown_msi_irq)(unsigned int irq);
10046 void (*teardown_msi_irqs)(struct pci_dev *dev);
10047-};
10048+} __no_const;
10049
10050 extern struct x86_init_ops x86_init;
10051 extern struct x86_cpuinit_ops x86_cpuinit;
10052diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10053--- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10054+++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10055@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10056 {
10057 int err;
10058
10059+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10060+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10061+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10062+#endif
10063+
10064 /*
10065 * Clear the xsave header first, so that reserved fields are
10066 * initialized to zero.
10067@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10068 u32 lmask = mask;
10069 u32 hmask = mask >> 32;
10070
10071+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10072+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10073+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10074+#endif
10075+
10076 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10077 "2:\n"
10078 ".section .fixup,\"ax\"\n"
10079diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10080--- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10081+++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10082@@ -224,7 +224,7 @@ config X86_HT
10083
10084 config X86_32_LAZY_GS
10085 def_bool y
10086- depends on X86_32 && !CC_STACKPROTECTOR
10087+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10088
10089 config ARCH_HWEIGHT_CFLAGS
10090 string
10091@@ -1022,7 +1022,7 @@ choice
10092
10093 config NOHIGHMEM
10094 bool "off"
10095- depends on !X86_NUMAQ
10096+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10097 ---help---
10098 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10099 However, the address space of 32-bit x86 processors is only 4
10100@@ -1059,7 +1059,7 @@ config NOHIGHMEM
10101
10102 config HIGHMEM4G
10103 bool "4GB"
10104- depends on !X86_NUMAQ
10105+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10106 ---help---
10107 Select this if you have a 32-bit processor and between 1 and 4
10108 gigabytes of physical RAM.
10109@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10110 hex
10111 default 0xB0000000 if VMSPLIT_3G_OPT
10112 default 0x80000000 if VMSPLIT_2G
10113- default 0x78000000 if VMSPLIT_2G_OPT
10114+ default 0x70000000 if VMSPLIT_2G_OPT
10115 default 0x40000000 if VMSPLIT_1G
10116 default 0xC0000000
10117 depends on X86_32
10118@@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10119
10120 config EFI
10121 bool "EFI runtime service support"
10122- depends on ACPI
10123+ depends on ACPI && !PAX_KERNEXEC
10124 ---help---
10125 This enables the kernel to use EFI runtime services that are
10126 available (such as the EFI variable services).
10127@@ -1487,6 +1487,7 @@ config SECCOMP
10128
10129 config CC_STACKPROTECTOR
10130 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10131+ depends on X86_64 || !PAX_MEMORY_UDEREF
10132 ---help---
10133 This option turns on the -fstack-protector GCC feature. This
10134 feature puts, at the beginning of functions, a canary value on
10135@@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10136 config PHYSICAL_START
10137 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10138 default "0x1000000"
10139+ range 0x400000 0x40000000
10140 ---help---
10141 This gives the physical address where the kernel is loaded.
10142
10143@@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10144 config PHYSICAL_ALIGN
10145 hex "Alignment value to which kernel should be aligned" if X86_32
10146 default "0x1000000"
10147+ range 0x400000 0x1000000 if PAX_KERNEXEC
10148 range 0x2000 0x1000000
10149 ---help---
10150 This value puts the alignment restrictions on physical address
10151@@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10152 Say N if you want to disable CPU hotplug.
10153
10154 config COMPAT_VDSO
10155- def_bool y
10156+ def_bool n
10157 prompt "Compat VDSO support"
10158 depends on X86_32 || IA32_EMULATION
10159+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10160 ---help---
10161 Map the 32-bit VDSO to the predictable old-style address too.
10162
10163diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10164--- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10165+++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10166@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10167
10168 config X86_F00F_BUG
10169 def_bool y
10170- depends on M586MMX || M586TSC || M586 || M486 || M386
10171+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10172
10173 config X86_INVD_BUG
10174 def_bool y
10175@@ -358,7 +358,7 @@ config X86_POPAD_OK
10176
10177 config X86_ALIGNMENT_16
10178 def_bool y
10179- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10180+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10181
10182 config X86_INTEL_USERCOPY
10183 def_bool y
10184@@ -404,7 +404,7 @@ config X86_CMPXCHG64
10185 # generates cmov.
10186 config X86_CMOV
10187 def_bool y
10188- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10189+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10190
10191 config X86_MINIMUM_CPU_FAMILY
10192 int
10193diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10194--- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10195+++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10196@@ -101,7 +101,7 @@ config X86_PTDUMP
10197 config DEBUG_RODATA
10198 bool "Write protect kernel read-only data structures"
10199 default y
10200- depends on DEBUG_KERNEL
10201+ depends on DEBUG_KERNEL && BROKEN
10202 ---help---
10203 Mark the kernel read-only data as write-protected in the pagetables,
10204 in order to catch accidental (and incorrect) writes to such const
10205@@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10206
10207 config DEBUG_SET_MODULE_RONX
10208 bool "Set loadable kernel module data as NX and text as RO"
10209- depends on MODULES
10210+ depends on MODULES && BROKEN
10211 ---help---
10212 This option helps catch unintended modifications to loadable
10213 kernel module's text and read-only data. It also prevents execution
10214diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10215--- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10216+++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10217@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10218 $(call cc-option, -fno-stack-protector) \
10219 $(call cc-option, -mpreferred-stack-boundary=2)
10220 KBUILD_CFLAGS += $(call cc-option, -m32)
10221+ifdef CONSTIFY_PLUGIN
10222+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10223+endif
10224 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10225 GCOV_PROFILE := n
10226
10227diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10228--- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10229+++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10230@@ -108,6 +108,9 @@ wakeup_code:
10231 /* Do any other stuff... */
10232
10233 #ifndef CONFIG_64BIT
10234+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10235+ call verify_cpu
10236+
10237 /* This could also be done in C code... */
10238 movl pmode_cr3, %eax
10239 movl %eax, %cr3
10240@@ -131,6 +134,7 @@ wakeup_code:
10241 movl pmode_cr0, %eax
10242 movl %eax, %cr0
10243 jmp pmode_return
10244+# include "../../verify_cpu.S"
10245 #else
10246 pushw $0
10247 pushw trampoline_segment
10248diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10249--- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10250+++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10251@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10252 header->trampoline_segment = trampoline_address() >> 4;
10253 #ifdef CONFIG_SMP
10254 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10255+
10256+ pax_open_kernel();
10257 early_gdt_descr.address =
10258 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10259+ pax_close_kernel();
10260+
10261 initial_gs = per_cpu_offset(smp_processor_id());
10262 #endif
10263 initial_code = (unsigned long)wakeup_long64;
10264diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10265--- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10266+++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10267@@ -30,13 +30,11 @@ wakeup_pmode_return:
10268 # and restore the stack ... but you need gdt for this to work
10269 movl saved_context_esp, %esp
10270
10271- movl %cs:saved_magic, %eax
10272- cmpl $0x12345678, %eax
10273+ cmpl $0x12345678, saved_magic
10274 jne bogus_magic
10275
10276 # jump to place where we left off
10277- movl saved_eip, %eax
10278- jmp *%eax
10279+ jmp *(saved_eip)
10280
10281 bogus_magic:
10282 jmp bogus_magic
10283diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10284--- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10285+++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10286@@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10287 if (!*poff || ptr < text || ptr >= text_end)
10288 continue;
10289 /* turn DS segment override prefix into lock prefix */
10290- if (*ptr == 0x3e)
10291+ if (*ktla_ktva(ptr) == 0x3e)
10292 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10293 };
10294 mutex_unlock(&text_mutex);
10295@@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10296 if (!*poff || ptr < text || ptr >= text_end)
10297 continue;
10298 /* turn lock prefix into DS segment override prefix */
10299- if (*ptr == 0xf0)
10300+ if (*ktla_ktva(ptr) == 0xf0)
10301 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10302 };
10303 mutex_unlock(&text_mutex);
10304@@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10305
10306 BUG_ON(p->len > MAX_PATCH_LEN);
10307 /* prep the buffer with the original instructions */
10308- memcpy(insnbuf, p->instr, p->len);
10309+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10310 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10311 (unsigned long)p->instr, p->len);
10312
10313@@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10314 if (smp_alt_once)
10315 free_init_pages("SMP alternatives",
10316 (unsigned long)__smp_locks,
10317- (unsigned long)__smp_locks_end);
10318+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10319
10320 restart_nmi();
10321 }
10322@@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10323 * instructions. And on the local CPU you need to be protected again NMI or MCE
10324 * handlers seeing an inconsistent instruction while you patch.
10325 */
10326-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10327+void *__kprobes text_poke_early(void *addr, const void *opcode,
10328 size_t len)
10329 {
10330 unsigned long flags;
10331 local_irq_save(flags);
10332- memcpy(addr, opcode, len);
10333+
10334+ pax_open_kernel();
10335+ memcpy(ktla_ktva(addr), opcode, len);
10336 sync_core();
10337+ pax_close_kernel();
10338+
10339 local_irq_restore(flags);
10340 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10341 that causes hangs on some VIA CPUs. */
10342@@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10343 */
10344 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10345 {
10346- unsigned long flags;
10347- char *vaddr;
10348+ unsigned char *vaddr = ktla_ktva(addr);
10349 struct page *pages[2];
10350- int i;
10351+ size_t i;
10352
10353 if (!core_kernel_text((unsigned long)addr)) {
10354- pages[0] = vmalloc_to_page(addr);
10355- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10356+ pages[0] = vmalloc_to_page(vaddr);
10357+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10358 } else {
10359- pages[0] = virt_to_page(addr);
10360+ pages[0] = virt_to_page(vaddr);
10361 WARN_ON(!PageReserved(pages[0]));
10362- pages[1] = virt_to_page(addr + PAGE_SIZE);
10363+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10364 }
10365 BUG_ON(!pages[0]);
10366- local_irq_save(flags);
10367- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10368- if (pages[1])
10369- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10370- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10371- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10372- clear_fixmap(FIX_TEXT_POKE0);
10373- if (pages[1])
10374- clear_fixmap(FIX_TEXT_POKE1);
10375- local_flush_tlb();
10376- sync_core();
10377- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10378- that causes hangs on some VIA CPUs. */
10379+ text_poke_early(addr, opcode, len);
10380 for (i = 0; i < len; i++)
10381- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10382- local_irq_restore(flags);
10383+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10384 return addr;
10385 }
10386
10387@@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10388 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10389
10390 #ifdef CONFIG_X86_64
10391-unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10392+unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10393 #else
10394-unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10395+unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10396 #endif
10397
10398 void __init arch_init_ideal_nop5(void)
10399diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10400--- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10401+++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-05 19:44:33.000000000 -0400
10402@@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10403 apic_write(APIC_ESR, 0);
10404 v1 = apic_read(APIC_ESR);
10405 ack_APIC_irq();
10406- atomic_inc(&irq_err_count);
10407+ atomic_inc_unchecked(&irq_err_count);
10408
10409 /*
10410 * Here is what the APIC error bits mean:
10411@@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10412 u16 *bios_cpu_apicid;
10413 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10414
10415+ pax_track_stack();
10416+
10417 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10418 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10419
10420diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10421--- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10422+++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10423@@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10424 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10425 GFP_ATOMIC);
10426 if (!ioapic_entries)
10427- return 0;
10428+ return NULL;
10429
10430 for (apic = 0; apic < nr_ioapics; apic++) {
10431 ioapic_entries[apic] =
10432@@ -640,7 +640,7 @@ nomem:
10433 kfree(ioapic_entries[apic]);
10434 kfree(ioapic_entries);
10435
10436- return 0;
10437+ return NULL;
10438 }
10439
10440 /*
10441@@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10442 }
10443 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10444
10445-void lock_vector_lock(void)
10446+void lock_vector_lock(void) __acquires(vector_lock)
10447 {
10448 /* Used to the online set of cpus does not change
10449 * during assign_irq_vector.
10450@@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10451 raw_spin_lock(&vector_lock);
10452 }
10453
10454-void unlock_vector_lock(void)
10455+void unlock_vector_lock(void) __releases(vector_lock)
10456 {
10457 raw_spin_unlock(&vector_lock);
10458 }
10459@@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10460 ack_APIC_irq();
10461 }
10462
10463-atomic_t irq_mis_count;
10464+atomic_unchecked_t irq_mis_count;
10465
10466 /*
10467 * IO-APIC versions below 0x20 don't support EOI register.
10468@@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10469 * at the cpu.
10470 */
10471 if (!(v & (1 << (i & 0x1f)))) {
10472- atomic_inc(&irq_mis_count);
10473+ atomic_inc_unchecked(&irq_mis_count);
10474
10475 eoi_ioapic_irq(irq, cfg);
10476 }
10477diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10478--- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10479+++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10480@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10481 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10482 * even though they are called in protected mode.
10483 */
10484-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10485+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10486 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10487
10488 static const char driver_version[] = "1.16ac"; /* no spaces */
10489@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10490 BUG_ON(cpu != 0);
10491 gdt = get_cpu_gdt_table(cpu);
10492 save_desc_40 = gdt[0x40 / 8];
10493+
10494+ pax_open_kernel();
10495 gdt[0x40 / 8] = bad_bios_desc;
10496+ pax_close_kernel();
10497
10498 apm_irq_save(flags);
10499 APM_DO_SAVE_SEGS;
10500@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10501 &call->esi);
10502 APM_DO_RESTORE_SEGS;
10503 apm_irq_restore(flags);
10504+
10505+ pax_open_kernel();
10506 gdt[0x40 / 8] = save_desc_40;
10507+ pax_close_kernel();
10508+
10509 put_cpu();
10510
10511 return call->eax & 0xff;
10512@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10513 BUG_ON(cpu != 0);
10514 gdt = get_cpu_gdt_table(cpu);
10515 save_desc_40 = gdt[0x40 / 8];
10516+
10517+ pax_open_kernel();
10518 gdt[0x40 / 8] = bad_bios_desc;
10519+ pax_close_kernel();
10520
10521 apm_irq_save(flags);
10522 APM_DO_SAVE_SEGS;
10523@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10524 &call->eax);
10525 APM_DO_RESTORE_SEGS;
10526 apm_irq_restore(flags);
10527+
10528+ pax_open_kernel();
10529 gdt[0x40 / 8] = save_desc_40;
10530+ pax_close_kernel();
10531+
10532 put_cpu();
10533 return error;
10534 }
10535@@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10536 * code to that CPU.
10537 */
10538 gdt = get_cpu_gdt_table(0);
10539+
10540+ pax_open_kernel();
10541 set_desc_base(&gdt[APM_CS >> 3],
10542 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10543 set_desc_base(&gdt[APM_CS_16 >> 3],
10544 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10545 set_desc_base(&gdt[APM_DS >> 3],
10546 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10547+ pax_close_kernel();
10548
10549 proc_create("apm", 0, NULL, &apm_file_ops);
10550
10551diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10552--- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10553+++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10554@@ -69,6 +69,7 @@ int main(void)
10555 BLANK();
10556 #undef ENTRY
10557
10558+ DEFINE(TSS_size, sizeof(struct tss_struct));
10559 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10560 BLANK();
10561
10562diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10563--- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10564+++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10565@@ -33,6 +33,8 @@ void common(void) {
10566 OFFSET(TI_status, thread_info, status);
10567 OFFSET(TI_addr_limit, thread_info, addr_limit);
10568 OFFSET(TI_preempt_count, thread_info, preempt_count);
10569+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10570+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10571
10572 BLANK();
10573 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10574@@ -53,8 +55,26 @@ void common(void) {
10575 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10576 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10577 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10578+
10579+#ifdef CONFIG_PAX_KERNEXEC
10580+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10581+#endif
10582+
10583+#ifdef CONFIG_PAX_MEMORY_UDEREF
10584+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10585+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10586+#ifdef CONFIG_X86_64
10587+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10588+#endif
10589 #endif
10590
10591+#endif
10592+
10593+ BLANK();
10594+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10595+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10596+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10597+
10598 #ifdef CONFIG_XEN
10599 BLANK();
10600 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10601diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10602--- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10603+++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10604@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10605 unsigned int size)
10606 {
10607 /* AMD errata T13 (order #21922) */
10608- if ((c->x86 == 6)) {
10609+ if (c->x86 == 6) {
10610 /* Duron Rev A0 */
10611 if (c->x86_model == 3 && c->x86_mask == 0)
10612 size = 64;
10613diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10614--- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10615+++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10616@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10617
10618 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10619
10620-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10621-#ifdef CONFIG_X86_64
10622- /*
10623- * We need valid kernel segments for data and code in long mode too
10624- * IRET will check the segment types kkeil 2000/10/28
10625- * Also sysret mandates a special GDT layout
10626- *
10627- * TLS descriptors are currently at a different place compared to i386.
10628- * Hopefully nobody expects them at a fixed place (Wine?)
10629- */
10630- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10631- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10632- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10633- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10634- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10635- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10636-#else
10637- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10638- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10639- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10640- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10641- /*
10642- * Segments used for calling PnP BIOS have byte granularity.
10643- * They code segments and data segments have fixed 64k limits,
10644- * the transfer segment sizes are set at run time.
10645- */
10646- /* 32-bit code */
10647- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10648- /* 16-bit code */
10649- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10650- /* 16-bit data */
10651- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10652- /* 16-bit data */
10653- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10654- /* 16-bit data */
10655- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10656- /*
10657- * The APM segments have byte granularity and their bases
10658- * are set at run time. All have 64k limits.
10659- */
10660- /* 32-bit code */
10661- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10662- /* 16-bit code */
10663- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10664- /* data */
10665- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10666-
10667- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10668- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10669- GDT_STACK_CANARY_INIT
10670-#endif
10671-} };
10672-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10673-
10674 static int __init x86_xsave_setup(char *s)
10675 {
10676 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10677@@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10678 {
10679 struct desc_ptr gdt_descr;
10680
10681- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10682+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10683 gdt_descr.size = GDT_SIZE - 1;
10684 load_gdt(&gdt_descr);
10685 /* Reload the per-cpu base */
10686@@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10687 /* Filter out anything that depends on CPUID levels we don't have */
10688 filter_cpuid_features(c, true);
10689
10690+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10691+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10692+#endif
10693+
10694 /* If the model name is still unset, do table lookup. */
10695 if (!c->x86_model_id[0]) {
10696 const char *p;
10697@@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10698 }
10699 __setup("clearcpuid=", setup_disablecpuid);
10700
10701+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10702+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10703+
10704 #ifdef CONFIG_X86_64
10705 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10706
10707@@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10708 EXPORT_PER_CPU_SYMBOL(current_task);
10709
10710 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10711- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10712+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10713 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10714
10715 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10716@@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10717 {
10718 memset(regs, 0, sizeof(struct pt_regs));
10719 regs->fs = __KERNEL_PERCPU;
10720- regs->gs = __KERNEL_STACK_CANARY;
10721+ savesegment(gs, regs->gs);
10722
10723 return regs;
10724 }
10725@@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10726 int i;
10727
10728 cpu = stack_smp_processor_id();
10729- t = &per_cpu(init_tss, cpu);
10730+ t = init_tss + cpu;
10731 oist = &per_cpu(orig_ist, cpu);
10732
10733 #ifdef CONFIG_NUMA
10734@@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10735 switch_to_new_gdt(cpu);
10736 loadsegment(fs, 0);
10737
10738- load_idt((const struct desc_ptr *)&idt_descr);
10739+ load_idt(&idt_descr);
10740
10741 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10742 syscall_init();
10743@@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10744 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10745 barrier();
10746
10747- x86_configure_nx();
10748 if (cpu != 0)
10749 enable_x2apic();
10750
10751@@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10752 {
10753 int cpu = smp_processor_id();
10754 struct task_struct *curr = current;
10755- struct tss_struct *t = &per_cpu(init_tss, cpu);
10756+ struct tss_struct *t = init_tss + cpu;
10757 struct thread_struct *thread = &curr->thread;
10758
10759 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10760diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10761--- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10762+++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10763@@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10764 * Update the IDT descriptor and reload the IDT so that
10765 * it uses the read-only mapped virtual address.
10766 */
10767- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10768+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10769 load_idt(&idt_descr);
10770 }
10771 #endif
10772diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10773--- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10774+++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10775@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10776 CFLAGS_REMOVE_perf_event.o = -pg
10777 endif
10778
10779-# Make sure load_percpu_segment has no stackprotector
10780-nostackp := $(call cc-option, -fno-stack-protector)
10781-CFLAGS_common.o := $(nostackp)
10782-
10783 obj-y := intel_cacheinfo.o scattered.o topology.o
10784 obj-y += proc.o capflags.o powerflags.o common.o
10785 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10786diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10787--- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10788+++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10789@@ -46,6 +46,7 @@
10790 #include <asm/ipi.h>
10791 #include <asm/mce.h>
10792 #include <asm/msr.h>
10793+#include <asm/local.h>
10794
10795 #include "mce-internal.h"
10796
10797@@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10798 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10799 m->cs, m->ip);
10800
10801- if (m->cs == __KERNEL_CS)
10802+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10803 print_symbol("{%s}", m->ip);
10804 pr_cont("\n");
10805 }
10806@@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10807
10808 #define PANIC_TIMEOUT 5 /* 5 seconds */
10809
10810-static atomic_t mce_paniced;
10811+static atomic_unchecked_t mce_paniced;
10812
10813 static int fake_panic;
10814-static atomic_t mce_fake_paniced;
10815+static atomic_unchecked_t mce_fake_paniced;
10816
10817 /* Panic in progress. Enable interrupts and wait for final IPI */
10818 static void wait_for_panic(void)
10819@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10820 /*
10821 * Make sure only one CPU runs in machine check panic
10822 */
10823- if (atomic_inc_return(&mce_paniced) > 1)
10824+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10825 wait_for_panic();
10826 barrier();
10827
10828@@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10829 console_verbose();
10830 } else {
10831 /* Don't log too much for fake panic */
10832- if (atomic_inc_return(&mce_fake_paniced) > 1)
10833+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10834 return;
10835 }
10836 /* First print corrected ones that are still unlogged */
10837@@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10838 * might have been modified by someone else.
10839 */
10840 rmb();
10841- if (atomic_read(&mce_paniced))
10842+ if (atomic_read_unchecked(&mce_paniced))
10843 wait_for_panic();
10844 if (!monarch_timeout)
10845 goto out;
10846@@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10847 */
10848
10849 static DEFINE_SPINLOCK(mce_state_lock);
10850-static int open_count; /* #times opened */
10851+static local_t open_count; /* #times opened */
10852 static int open_exclu; /* already open exclusive? */
10853
10854 static int mce_open(struct inode *inode, struct file *file)
10855 {
10856 spin_lock(&mce_state_lock);
10857
10858- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10859+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10860 spin_unlock(&mce_state_lock);
10861
10862 return -EBUSY;
10863@@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10864
10865 if (file->f_flags & O_EXCL)
10866 open_exclu = 1;
10867- open_count++;
10868+ local_inc(&open_count);
10869
10870 spin_unlock(&mce_state_lock);
10871
10872@@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10873 {
10874 spin_lock(&mce_state_lock);
10875
10876- open_count--;
10877+ local_dec(&open_count);
10878 open_exclu = 0;
10879
10880 spin_unlock(&mce_state_lock);
10881@@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10882 static void mce_reset(void)
10883 {
10884 cpu_missing = 0;
10885- atomic_set(&mce_fake_paniced, 0);
10886+ atomic_set_unchecked(&mce_fake_paniced, 0);
10887 atomic_set(&mce_executing, 0);
10888 atomic_set(&mce_callin, 0);
10889 atomic_set(&global_nwo, 0);
10890diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10891--- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10892+++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10893@@ -215,7 +215,9 @@ static int inject_init(void)
10894 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10895 return -ENOMEM;
10896 printk(KERN_INFO "Machine check injector initialized\n");
10897- mce_chrdev_ops.write = mce_write;
10898+ pax_open_kernel();
10899+ *(void **)&mce_chrdev_ops.write = mce_write;
10900+ pax_close_kernel();
10901 register_die_notifier(&mce_raise_nb);
10902 return 0;
10903 }
10904diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10905--- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10906+++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10907@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10908 u64 size_or_mask, size_and_mask;
10909 static bool mtrr_aps_delayed_init;
10910
10911-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10912+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10913
10914 const struct mtrr_ops *mtrr_if;
10915
10916diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10917--- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10918+++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10919@@ -12,8 +12,8 @@
10920 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10921
10922 struct mtrr_ops {
10923- u32 vendor;
10924- u32 use_intel_if;
10925+ const u32 vendor;
10926+ const u32 use_intel_if;
10927 void (*set)(unsigned int reg, unsigned long base,
10928 unsigned long size, mtrr_type type);
10929 void (*set_all)(void);
10930diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10931--- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10932+++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10933@@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10934 int i, j, w, wmax, num = 0;
10935 struct hw_perf_event *hwc;
10936
10937+ pax_track_stack();
10938+
10939 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10940
10941 for (i = 0; i < n; i++) {
10942@@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10943 break;
10944
10945 perf_callchain_store(entry, frame.return_address);
10946- fp = frame.next_frame;
10947+ fp = (__force const void __user *)frame.next_frame;
10948 }
10949 }
10950
10951diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10952--- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10953+++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10954@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10955 regs = args->regs;
10956
10957 #ifdef CONFIG_X86_32
10958- if (!user_mode_vm(regs)) {
10959+ if (!user_mode(regs)) {
10960 crash_fixup_ss_esp(&fixed_regs, regs);
10961 regs = &fixed_regs;
10962 }
10963diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
10964--- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
10965+++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
10966@@ -11,7 +11,7 @@
10967
10968 #define DOUBLEFAULT_STACKSIZE (1024)
10969 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10970-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10971+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10972
10973 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10974
10975@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10976 unsigned long gdt, tss;
10977
10978 store_gdt(&gdt_desc);
10979- gdt = gdt_desc.address;
10980+ gdt = (unsigned long)gdt_desc.address;
10981
10982 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10983
10984@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10985 /* 0x2 bit is always set */
10986 .flags = X86_EFLAGS_SF | 0x2,
10987 .sp = STACK_START,
10988- .es = __USER_DS,
10989+ .es = __KERNEL_DS,
10990 .cs = __KERNEL_CS,
10991 .ss = __KERNEL_DS,
10992- .ds = __USER_DS,
10993+ .ds = __KERNEL_DS,
10994 .fs = __KERNEL_PERCPU,
10995
10996 .__cr3 = __pa_nodebug(swapper_pg_dir),
10997diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
10998--- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
10999+++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11000@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11001 bp = stack_frame(task, regs);
11002
11003 for (;;) {
11004- struct thread_info *context;
11005+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11006
11007- context = (struct thread_info *)
11008- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11009- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11010+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11011
11012- stack = (unsigned long *)context->previous_esp;
11013- if (!stack)
11014+ if (stack_start == task_stack_page(task))
11015 break;
11016+ stack = *(unsigned long **)stack_start;
11017 if (ops->stack(data, "IRQ") < 0)
11018 break;
11019 touch_nmi_watchdog();
11020@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11021 * When in-kernel, we also print out the stack and code at the
11022 * time of the fault..
11023 */
11024- if (!user_mode_vm(regs)) {
11025+ if (!user_mode(regs)) {
11026 unsigned int code_prologue = code_bytes * 43 / 64;
11027 unsigned int code_len = code_bytes;
11028 unsigned char c;
11029 u8 *ip;
11030+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11031
11032 printk(KERN_EMERG "Stack:\n");
11033 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11034
11035 printk(KERN_EMERG "Code: ");
11036
11037- ip = (u8 *)regs->ip - code_prologue;
11038+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11039 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11040 /* try starting at IP */
11041- ip = (u8 *)regs->ip;
11042+ ip = (u8 *)regs->ip + cs_base;
11043 code_len = code_len - code_prologue + 1;
11044 }
11045 for (i = 0; i < code_len; i++, ip++) {
11046@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11047 printk(" Bad EIP value.");
11048 break;
11049 }
11050- if (ip == (u8 *)regs->ip)
11051+ if (ip == (u8 *)regs->ip + cs_base)
11052 printk("<%02x> ", c);
11053 else
11054 printk("%02x ", c);
11055@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11056 {
11057 unsigned short ud2;
11058
11059+ ip = ktla_ktva(ip);
11060 if (ip < PAGE_OFFSET)
11061 return 0;
11062 if (probe_kernel_address((unsigned short *)ip, ud2))
11063diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11064--- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11065+++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11066@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11067 unsigned long *irq_stack_end =
11068 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11069 unsigned used = 0;
11070- struct thread_info *tinfo;
11071 int graph = 0;
11072 unsigned long dummy;
11073+ void *stack_start;
11074
11075 if (!task)
11076 task = current;
11077@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11078 * current stack address. If the stacks consist of nested
11079 * exceptions
11080 */
11081- tinfo = task_thread_info(task);
11082 for (;;) {
11083 char *id;
11084 unsigned long *estack_end;
11085+
11086 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11087 &used, &id);
11088
11089@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11090 if (ops->stack(data, id) < 0)
11091 break;
11092
11093- bp = ops->walk_stack(tinfo, stack, bp, ops,
11094+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11095 data, estack_end, &graph);
11096 ops->stack(data, "<EOE>");
11097 /*
11098@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11099 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11100 if (ops->stack(data, "IRQ") < 0)
11101 break;
11102- bp = ops->walk_stack(tinfo, stack, bp,
11103+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11104 ops, data, irq_stack_end, &graph);
11105 /*
11106 * We link to the next stack (which would be
11107@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11108 /*
11109 * This handles the process stack:
11110 */
11111- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11112+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11113+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11114 put_cpu();
11115 }
11116 EXPORT_SYMBOL(dump_trace);
11117diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11118--- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11119+++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11120@@ -2,6 +2,9 @@
11121 * Copyright (C) 1991, 1992 Linus Torvalds
11122 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11123 */
11124+#ifdef CONFIG_GRKERNSEC_HIDESYM
11125+#define __INCLUDED_BY_HIDESYM 1
11126+#endif
11127 #include <linux/kallsyms.h>
11128 #include <linux/kprobes.h>
11129 #include <linux/uaccess.h>
11130@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11131 static void
11132 print_ftrace_graph_addr(unsigned long addr, void *data,
11133 const struct stacktrace_ops *ops,
11134- struct thread_info *tinfo, int *graph)
11135+ struct task_struct *task, int *graph)
11136 {
11137- struct task_struct *task = tinfo->task;
11138 unsigned long ret_addr;
11139 int index = task->curr_ret_stack;
11140
11141@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11142 static inline void
11143 print_ftrace_graph_addr(unsigned long addr, void *data,
11144 const struct stacktrace_ops *ops,
11145- struct thread_info *tinfo, int *graph)
11146+ struct task_struct *task, int *graph)
11147 { }
11148 #endif
11149
11150@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11151 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11152 */
11153
11154-static inline int valid_stack_ptr(struct thread_info *tinfo,
11155- void *p, unsigned int size, void *end)
11156+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11157 {
11158- void *t = tinfo;
11159 if (end) {
11160 if (p < end && p >= (end-THREAD_SIZE))
11161 return 1;
11162@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11163 }
11164
11165 unsigned long
11166-print_context_stack(struct thread_info *tinfo,
11167+print_context_stack(struct task_struct *task, void *stack_start,
11168 unsigned long *stack, unsigned long bp,
11169 const struct stacktrace_ops *ops, void *data,
11170 unsigned long *end, int *graph)
11171 {
11172 struct stack_frame *frame = (struct stack_frame *)bp;
11173
11174- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11175+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11176 unsigned long addr;
11177
11178 addr = *stack;
11179@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11180 } else {
11181 ops->address(data, addr, 0);
11182 }
11183- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11184+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11185 }
11186 stack++;
11187 }
11188@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11189 EXPORT_SYMBOL_GPL(print_context_stack);
11190
11191 unsigned long
11192-print_context_stack_bp(struct thread_info *tinfo,
11193+print_context_stack_bp(struct task_struct *task, void *stack_start,
11194 unsigned long *stack, unsigned long bp,
11195 const struct stacktrace_ops *ops, void *data,
11196 unsigned long *end, int *graph)
11197@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11198 struct stack_frame *frame = (struct stack_frame *)bp;
11199 unsigned long *ret_addr = &frame->return_address;
11200
11201- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11202+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11203 unsigned long addr = *ret_addr;
11204
11205 if (!__kernel_text_address(addr))
11206@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11207 ops->address(data, addr, 1);
11208 frame = frame->next_frame;
11209 ret_addr = &frame->return_address;
11210- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11211+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11212 }
11213
11214 return (unsigned long)frame;
11215@@ -202,7 +202,7 @@ void dump_stack(void)
11216
11217 bp = stack_frame(current, NULL);
11218 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11219- current->pid, current->comm, print_tainted(),
11220+ task_pid_nr(current), current->comm, print_tainted(),
11221 init_utsname()->release,
11222 (int)strcspn(init_utsname()->version, " "),
11223 init_utsname()->version);
11224@@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11225 }
11226 EXPORT_SYMBOL_GPL(oops_begin);
11227
11228+extern void gr_handle_kernel_exploit(void);
11229+
11230 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11231 {
11232 if (regs && kexec_should_crash(current))
11233@@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11234 panic("Fatal exception in interrupt");
11235 if (panic_on_oops)
11236 panic("Fatal exception");
11237- do_exit(signr);
11238+
11239+ gr_handle_kernel_exploit();
11240+
11241+ do_group_exit(signr);
11242 }
11243
11244 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11245@@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11246
11247 show_registers(regs);
11248 #ifdef CONFIG_X86_32
11249- if (user_mode_vm(regs)) {
11250+ if (user_mode(regs)) {
11251 sp = regs->sp;
11252 ss = regs->ss & 0xffff;
11253 } else {
11254@@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11255 unsigned long flags = oops_begin();
11256 int sig = SIGSEGV;
11257
11258- if (!user_mode_vm(regs))
11259+ if (!user_mode(regs))
11260 report_bug(regs->ip, regs);
11261
11262 if (__die(str, regs, err))
11263diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11264--- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11265+++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11266@@ -7,6 +7,7 @@
11267 #include <linux/pci_regs.h>
11268 #include <linux/pci_ids.h>
11269 #include <linux/errno.h>
11270+#include <linux/sched.h>
11271 #include <asm/io.h>
11272 #include <asm/processor.h>
11273 #include <asm/fcntl.h>
11274@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11275 int n;
11276 va_list ap;
11277
11278+ pax_track_stack();
11279+
11280 va_start(ap, fmt);
11281 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11282 early_console->write(early_console, buf, n);
11283diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11284--- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11285+++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11286@@ -185,13 +185,146 @@
11287 /*CFI_REL_OFFSET gs, PT_GS*/
11288 .endm
11289 .macro SET_KERNEL_GS reg
11290+
11291+#ifdef CONFIG_CC_STACKPROTECTOR
11292 movl $(__KERNEL_STACK_CANARY), \reg
11293+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11294+ movl $(__USER_DS), \reg
11295+#else
11296+ xorl \reg, \reg
11297+#endif
11298+
11299 movl \reg, %gs
11300 .endm
11301
11302 #endif /* CONFIG_X86_32_LAZY_GS */
11303
11304-.macro SAVE_ALL
11305+.macro pax_enter_kernel
11306+#ifdef CONFIG_PAX_KERNEXEC
11307+ call pax_enter_kernel
11308+#endif
11309+.endm
11310+
11311+.macro pax_exit_kernel
11312+#ifdef CONFIG_PAX_KERNEXEC
11313+ call pax_exit_kernel
11314+#endif
11315+.endm
11316+
11317+#ifdef CONFIG_PAX_KERNEXEC
11318+ENTRY(pax_enter_kernel)
11319+#ifdef CONFIG_PARAVIRT
11320+ pushl %eax
11321+ pushl %ecx
11322+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11323+ mov %eax, %esi
11324+#else
11325+ mov %cr0, %esi
11326+#endif
11327+ bts $16, %esi
11328+ jnc 1f
11329+ mov %cs, %esi
11330+ cmp $__KERNEL_CS, %esi
11331+ jz 3f
11332+ ljmp $__KERNEL_CS, $3f
11333+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11334+2:
11335+#ifdef CONFIG_PARAVIRT
11336+ mov %esi, %eax
11337+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11338+#else
11339+ mov %esi, %cr0
11340+#endif
11341+3:
11342+#ifdef CONFIG_PARAVIRT
11343+ popl %ecx
11344+ popl %eax
11345+#endif
11346+ ret
11347+ENDPROC(pax_enter_kernel)
11348+
11349+ENTRY(pax_exit_kernel)
11350+#ifdef CONFIG_PARAVIRT
11351+ pushl %eax
11352+ pushl %ecx
11353+#endif
11354+ mov %cs, %esi
11355+ cmp $__KERNEXEC_KERNEL_CS, %esi
11356+ jnz 2f
11357+#ifdef CONFIG_PARAVIRT
11358+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11359+ mov %eax, %esi
11360+#else
11361+ mov %cr0, %esi
11362+#endif
11363+ btr $16, %esi
11364+ ljmp $__KERNEL_CS, $1f
11365+1:
11366+#ifdef CONFIG_PARAVIRT
11367+ mov %esi, %eax
11368+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11369+#else
11370+ mov %esi, %cr0
11371+#endif
11372+2:
11373+#ifdef CONFIG_PARAVIRT
11374+ popl %ecx
11375+ popl %eax
11376+#endif
11377+ ret
11378+ENDPROC(pax_exit_kernel)
11379+#endif
11380+
11381+.macro pax_erase_kstack
11382+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11383+ call pax_erase_kstack
11384+#endif
11385+.endm
11386+
11387+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11388+/*
11389+ * ebp: thread_info
11390+ * ecx, edx: can be clobbered
11391+ */
11392+ENTRY(pax_erase_kstack)
11393+ pushl %edi
11394+ pushl %eax
11395+
11396+ mov TI_lowest_stack(%ebp), %edi
11397+ mov $-0xBEEF, %eax
11398+ std
11399+
11400+1: mov %edi, %ecx
11401+ and $THREAD_SIZE_asm - 1, %ecx
11402+ shr $2, %ecx
11403+ repne scasl
11404+ jecxz 2f
11405+
11406+ cmp $2*16, %ecx
11407+ jc 2f
11408+
11409+ mov $2*16, %ecx
11410+ repe scasl
11411+ jecxz 2f
11412+ jne 1b
11413+
11414+2: cld
11415+ mov %esp, %ecx
11416+ sub %edi, %ecx
11417+ shr $2, %ecx
11418+ rep stosl
11419+
11420+ mov TI_task_thread_sp0(%ebp), %edi
11421+ sub $128, %edi
11422+ mov %edi, TI_lowest_stack(%ebp)
11423+
11424+ popl %eax
11425+ popl %edi
11426+ ret
11427+ENDPROC(pax_erase_kstack)
11428+#endif
11429+
11430+.macro __SAVE_ALL _DS
11431 cld
11432 PUSH_GS
11433 pushl_cfi %fs
11434@@ -214,7 +347,7 @@
11435 CFI_REL_OFFSET ecx, 0
11436 pushl_cfi %ebx
11437 CFI_REL_OFFSET ebx, 0
11438- movl $(__USER_DS), %edx
11439+ movl $\_DS, %edx
11440 movl %edx, %ds
11441 movl %edx, %es
11442 movl $(__KERNEL_PERCPU), %edx
11443@@ -222,6 +355,15 @@
11444 SET_KERNEL_GS %edx
11445 .endm
11446
11447+.macro SAVE_ALL
11448+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11449+ __SAVE_ALL __KERNEL_DS
11450+ pax_enter_kernel
11451+#else
11452+ __SAVE_ALL __USER_DS
11453+#endif
11454+.endm
11455+
11456 .macro RESTORE_INT_REGS
11457 popl_cfi %ebx
11458 CFI_RESTORE ebx
11459@@ -332,7 +474,15 @@ check_userspace:
11460 movb PT_CS(%esp), %al
11461 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11462 cmpl $USER_RPL, %eax
11463+
11464+#ifdef CONFIG_PAX_KERNEXEC
11465+ jae resume_userspace
11466+
11467+ PAX_EXIT_KERNEL
11468+ jmp resume_kernel
11469+#else
11470 jb resume_kernel # not returning to v8086 or userspace
11471+#endif
11472
11473 ENTRY(resume_userspace)
11474 LOCKDEP_SYS_EXIT
11475@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11476 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11477 # int/exception return?
11478 jne work_pending
11479- jmp restore_all
11480+ jmp restore_all_pax
11481 END(ret_from_exception)
11482
11483 #ifdef CONFIG_PREEMPT
11484@@ -394,23 +544,34 @@ sysenter_past_esp:
11485 /*CFI_REL_OFFSET cs, 0*/
11486 /*
11487 * Push current_thread_info()->sysenter_return to the stack.
11488- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11489- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11490 */
11491- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11492+ pushl_cfi $0
11493 CFI_REL_OFFSET eip, 0
11494
11495 pushl_cfi %eax
11496 SAVE_ALL
11497+ GET_THREAD_INFO(%ebp)
11498+ movl TI_sysenter_return(%ebp),%ebp
11499+ movl %ebp,PT_EIP(%esp)
11500 ENABLE_INTERRUPTS(CLBR_NONE)
11501
11502 /*
11503 * Load the potential sixth argument from user stack.
11504 * Careful about security.
11505 */
11506+ movl PT_OLDESP(%esp),%ebp
11507+
11508+#ifdef CONFIG_PAX_MEMORY_UDEREF
11509+ mov PT_OLDSS(%esp),%ds
11510+1: movl %ds:(%ebp),%ebp
11511+ push %ss
11512+ pop %ds
11513+#else
11514 cmpl $__PAGE_OFFSET-3,%ebp
11515 jae syscall_fault
11516 1: movl (%ebp),%ebp
11517+#endif
11518+
11519 movl %ebp,PT_EBP(%esp)
11520 .section __ex_table,"a"
11521 .align 4
11522@@ -433,12 +594,23 @@ sysenter_do_call:
11523 testl $_TIF_ALLWORK_MASK, %ecx
11524 jne sysexit_audit
11525 sysenter_exit:
11526+
11527+#ifdef CONFIG_PAX_RANDKSTACK
11528+ pushl_cfi %eax
11529+ call pax_randomize_kstack
11530+ popl_cfi %eax
11531+#endif
11532+
11533+ pax_erase_kstack
11534+
11535 /* if something modifies registers it must also disable sysexit */
11536 movl PT_EIP(%esp), %edx
11537 movl PT_OLDESP(%esp), %ecx
11538 xorl %ebp,%ebp
11539 TRACE_IRQS_ON
11540 1: mov PT_FS(%esp), %fs
11541+2: mov PT_DS(%esp), %ds
11542+3: mov PT_ES(%esp), %es
11543 PTGS_TO_GS
11544 ENABLE_INTERRUPTS_SYSEXIT
11545
11546@@ -455,6 +627,9 @@ sysenter_audit:
11547 movl %eax,%edx /* 2nd arg: syscall number */
11548 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11549 call audit_syscall_entry
11550+
11551+ pax_erase_kstack
11552+
11553 pushl_cfi %ebx
11554 movl PT_EAX(%esp),%eax /* reload syscall number */
11555 jmp sysenter_do_call
11556@@ -481,11 +656,17 @@ sysexit_audit:
11557
11558 CFI_ENDPROC
11559 .pushsection .fixup,"ax"
11560-2: movl $0,PT_FS(%esp)
11561+4: movl $0,PT_FS(%esp)
11562+ jmp 1b
11563+5: movl $0,PT_DS(%esp)
11564+ jmp 1b
11565+6: movl $0,PT_ES(%esp)
11566 jmp 1b
11567 .section __ex_table,"a"
11568 .align 4
11569- .long 1b,2b
11570+ .long 1b,4b
11571+ .long 2b,5b
11572+ .long 3b,6b
11573 .popsection
11574 PTGS_TO_GS_EX
11575 ENDPROC(ia32_sysenter_target)
11576@@ -518,6 +699,14 @@ syscall_exit:
11577 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11578 jne syscall_exit_work
11579
11580+restore_all_pax:
11581+
11582+#ifdef CONFIG_PAX_RANDKSTACK
11583+ call pax_randomize_kstack
11584+#endif
11585+
11586+ pax_erase_kstack
11587+
11588 restore_all:
11589 TRACE_IRQS_IRET
11590 restore_all_notrace:
11591@@ -577,14 +766,21 @@ ldt_ss:
11592 * compensating for the offset by changing to the ESPFIX segment with
11593 * a base address that matches for the difference.
11594 */
11595-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11596+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11597 mov %esp, %edx /* load kernel esp */
11598 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11599 mov %dx, %ax /* eax: new kernel esp */
11600 sub %eax, %edx /* offset (low word is 0) */
11601+#ifdef CONFIG_SMP
11602+ movl PER_CPU_VAR(cpu_number), %ebx
11603+ shll $PAGE_SHIFT_asm, %ebx
11604+ addl $cpu_gdt_table, %ebx
11605+#else
11606+ movl $cpu_gdt_table, %ebx
11607+#endif
11608 shr $16, %edx
11609- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11610- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11611+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11612+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11613 pushl_cfi $__ESPFIX_SS
11614 pushl_cfi %eax /* new kernel esp */
11615 /* Disable interrupts, but do not irqtrace this section: we
11616@@ -613,29 +809,23 @@ work_resched:
11617 movl TI_flags(%ebp), %ecx
11618 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11619 # than syscall tracing?
11620- jz restore_all
11621+ jz restore_all_pax
11622 testb $_TIF_NEED_RESCHED, %cl
11623 jnz work_resched
11624
11625 work_notifysig: # deal with pending signals and
11626 # notify-resume requests
11627+ movl %esp, %eax
11628 #ifdef CONFIG_VM86
11629 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11630- movl %esp, %eax
11631- jne work_notifysig_v86 # returning to kernel-space or
11632+ jz 1f # returning to kernel-space or
11633 # vm86-space
11634- xorl %edx, %edx
11635- call do_notify_resume
11636- jmp resume_userspace_sig
11637
11638- ALIGN
11639-work_notifysig_v86:
11640 pushl_cfi %ecx # save ti_flags for do_notify_resume
11641 call save_v86_state # %eax contains pt_regs pointer
11642 popl_cfi %ecx
11643 movl %eax, %esp
11644-#else
11645- movl %esp, %eax
11646+1:
11647 #endif
11648 xorl %edx, %edx
11649 call do_notify_resume
11650@@ -648,6 +838,9 @@ syscall_trace_entry:
11651 movl $-ENOSYS,PT_EAX(%esp)
11652 movl %esp, %eax
11653 call syscall_trace_enter
11654+
11655+ pax_erase_kstack
11656+
11657 /* What it returned is what we'll actually use. */
11658 cmpl $(nr_syscalls), %eax
11659 jnae syscall_call
11660@@ -670,6 +863,10 @@ END(syscall_exit_work)
11661
11662 RING0_INT_FRAME # can't unwind into user space anyway
11663 syscall_fault:
11664+#ifdef CONFIG_PAX_MEMORY_UDEREF
11665+ push %ss
11666+ pop %ds
11667+#endif
11668 GET_THREAD_INFO(%ebp)
11669 movl $-EFAULT,PT_EAX(%esp)
11670 jmp resume_userspace
11671@@ -752,6 +949,36 @@ ptregs_clone:
11672 CFI_ENDPROC
11673 ENDPROC(ptregs_clone)
11674
11675+ ALIGN;
11676+ENTRY(kernel_execve)
11677+ CFI_STARTPROC
11678+ pushl_cfi %ebp
11679+ sub $PT_OLDSS+4,%esp
11680+ pushl_cfi %edi
11681+ pushl_cfi %ecx
11682+ pushl_cfi %eax
11683+ lea 3*4(%esp),%edi
11684+ mov $PT_OLDSS/4+1,%ecx
11685+ xorl %eax,%eax
11686+ rep stosl
11687+ popl_cfi %eax
11688+ popl_cfi %ecx
11689+ popl_cfi %edi
11690+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11691+ pushl_cfi %esp
11692+ call sys_execve
11693+ add $4,%esp
11694+ CFI_ADJUST_CFA_OFFSET -4
11695+ GET_THREAD_INFO(%ebp)
11696+ test %eax,%eax
11697+ jz syscall_exit
11698+ add $PT_OLDSS+4,%esp
11699+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11700+ popl_cfi %ebp
11701+ ret
11702+ CFI_ENDPROC
11703+ENDPROC(kernel_execve)
11704+
11705 .macro FIXUP_ESPFIX_STACK
11706 /*
11707 * Switch back for ESPFIX stack to the normal zerobased stack
11708@@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11709 * normal stack and adjusts ESP with the matching offset.
11710 */
11711 /* fixup the stack */
11712- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11713- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11714+#ifdef CONFIG_SMP
11715+ movl PER_CPU_VAR(cpu_number), %ebx
11716+ shll $PAGE_SHIFT_asm, %ebx
11717+ addl $cpu_gdt_table, %ebx
11718+#else
11719+ movl $cpu_gdt_table, %ebx
11720+#endif
11721+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11722+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11723 shl $16, %eax
11724 addl %esp, %eax /* the adjusted stack pointer */
11725 pushl_cfi $__KERNEL_DS
11726@@ -1213,7 +1447,6 @@ return_to_handler:
11727 jmp *%ecx
11728 #endif
11729
11730-.section .rodata,"a"
11731 #include "syscall_table_32.S"
11732
11733 syscall_table_size=(.-sys_call_table)
11734@@ -1259,9 +1492,12 @@ error_code:
11735 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11736 REG_TO_PTGS %ecx
11737 SET_KERNEL_GS %ecx
11738- movl $(__USER_DS), %ecx
11739+ movl $(__KERNEL_DS), %ecx
11740 movl %ecx, %ds
11741 movl %ecx, %es
11742+
11743+ pax_enter_kernel
11744+
11745 TRACE_IRQS_OFF
11746 movl %esp,%eax # pt_regs pointer
11747 call *%edi
11748@@ -1346,6 +1582,9 @@ nmi_stack_correct:
11749 xorl %edx,%edx # zero error code
11750 movl %esp,%eax # pt_regs pointer
11751 call do_nmi
11752+
11753+ pax_exit_kernel
11754+
11755 jmp restore_all_notrace
11756 CFI_ENDPROC
11757
11758@@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11759 FIXUP_ESPFIX_STACK # %eax == %esp
11760 xorl %edx,%edx # zero error code
11761 call do_nmi
11762+
11763+ pax_exit_kernel
11764+
11765 RESTORE_REGS
11766 lss 12+4(%esp), %esp # back to espfix stack
11767 CFI_ADJUST_CFA_OFFSET -24
11768diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11769--- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11770+++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11771@@ -53,6 +53,7 @@
11772 #include <asm/paravirt.h>
11773 #include <asm/ftrace.h>
11774 #include <asm/percpu.h>
11775+#include <asm/pgtable.h>
11776
11777 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11778 #include <linux/elf-em.h>
11779@@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11780 ENDPROC(native_usergs_sysret64)
11781 #endif /* CONFIG_PARAVIRT */
11782
11783+ .macro ljmpq sel, off
11784+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11785+ .byte 0x48; ljmp *1234f(%rip)
11786+ .pushsection .rodata
11787+ .align 16
11788+ 1234: .quad \off; .word \sel
11789+ .popsection
11790+#else
11791+ pushq $\sel
11792+ pushq $\off
11793+ lretq
11794+#endif
11795+ .endm
11796+
11797+ .macro pax_enter_kernel
11798+#ifdef CONFIG_PAX_KERNEXEC
11799+ call pax_enter_kernel
11800+#endif
11801+ .endm
11802+
11803+ .macro pax_exit_kernel
11804+#ifdef CONFIG_PAX_KERNEXEC
11805+ call pax_exit_kernel
11806+#endif
11807+ .endm
11808+
11809+#ifdef CONFIG_PAX_KERNEXEC
11810+ENTRY(pax_enter_kernel)
11811+ pushq %rdi
11812+
11813+#ifdef CONFIG_PARAVIRT
11814+ PV_SAVE_REGS(CLBR_RDI)
11815+#endif
11816+
11817+ GET_CR0_INTO_RDI
11818+ bts $16,%rdi
11819+ jnc 1f
11820+ mov %cs,%edi
11821+ cmp $__KERNEL_CS,%edi
11822+ jz 3f
11823+ ljmpq __KERNEL_CS,3f
11824+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11825+2: SET_RDI_INTO_CR0
11826+3:
11827+
11828+#ifdef CONFIG_PARAVIRT
11829+ PV_RESTORE_REGS(CLBR_RDI)
11830+#endif
11831+
11832+ popq %rdi
11833+ retq
11834+ENDPROC(pax_enter_kernel)
11835+
11836+ENTRY(pax_exit_kernel)
11837+ pushq %rdi
11838+
11839+#ifdef CONFIG_PARAVIRT
11840+ PV_SAVE_REGS(CLBR_RDI)
11841+#endif
11842+
11843+ mov %cs,%rdi
11844+ cmp $__KERNEXEC_KERNEL_CS,%edi
11845+ jnz 2f
11846+ GET_CR0_INTO_RDI
11847+ btr $16,%rdi
11848+ ljmpq __KERNEL_CS,1f
11849+1: SET_RDI_INTO_CR0
11850+2:
11851+
11852+#ifdef CONFIG_PARAVIRT
11853+ PV_RESTORE_REGS(CLBR_RDI);
11854+#endif
11855+
11856+ popq %rdi
11857+ retq
11858+ENDPROC(pax_exit_kernel)
11859+#endif
11860+
11861+ .macro pax_enter_kernel_user
11862+#ifdef CONFIG_PAX_MEMORY_UDEREF
11863+ call pax_enter_kernel_user
11864+#endif
11865+ .endm
11866+
11867+ .macro pax_exit_kernel_user
11868+#ifdef CONFIG_PAX_MEMORY_UDEREF
11869+ call pax_exit_kernel_user
11870+#endif
11871+#ifdef CONFIG_PAX_RANDKSTACK
11872+ push %rax
11873+ call pax_randomize_kstack
11874+ pop %rax
11875+#endif
11876+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11877+ call pax_erase_kstack
11878+#endif
11879+ .endm
11880+
11881+#ifdef CONFIG_PAX_MEMORY_UDEREF
11882+ENTRY(pax_enter_kernel_user)
11883+ pushq %rdi
11884+ pushq %rbx
11885+
11886+#ifdef CONFIG_PARAVIRT
11887+ PV_SAVE_REGS(CLBR_RDI)
11888+#endif
11889+
11890+ GET_CR3_INTO_RDI
11891+ mov %rdi,%rbx
11892+ add $__START_KERNEL_map,%rbx
11893+ sub phys_base(%rip),%rbx
11894+
11895+#ifdef CONFIG_PARAVIRT
11896+ pushq %rdi
11897+ cmpl $0, pv_info+PARAVIRT_enabled
11898+ jz 1f
11899+ i = 0
11900+ .rept USER_PGD_PTRS
11901+ mov i*8(%rbx),%rsi
11902+ mov $0,%sil
11903+ lea i*8(%rbx),%rdi
11904+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11905+ i = i + 1
11906+ .endr
11907+ jmp 2f
11908+1:
11909+#endif
11910+
11911+ i = 0
11912+ .rept USER_PGD_PTRS
11913+ movb $0,i*8(%rbx)
11914+ i = i + 1
11915+ .endr
11916+
11917+#ifdef CONFIG_PARAVIRT
11918+2: popq %rdi
11919+#endif
11920+ SET_RDI_INTO_CR3
11921+
11922+#ifdef CONFIG_PAX_KERNEXEC
11923+ GET_CR0_INTO_RDI
11924+ bts $16,%rdi
11925+ SET_RDI_INTO_CR0
11926+#endif
11927+
11928+#ifdef CONFIG_PARAVIRT
11929+ PV_RESTORE_REGS(CLBR_RDI)
11930+#endif
11931+
11932+ popq %rbx
11933+ popq %rdi
11934+ retq
11935+ENDPROC(pax_enter_kernel_user)
11936+
11937+ENTRY(pax_exit_kernel_user)
11938+ push %rdi
11939+
11940+#ifdef CONFIG_PARAVIRT
11941+ pushq %rbx
11942+ PV_SAVE_REGS(CLBR_RDI)
11943+#endif
11944+
11945+#ifdef CONFIG_PAX_KERNEXEC
11946+ GET_CR0_INTO_RDI
11947+ btr $16,%rdi
11948+ SET_RDI_INTO_CR0
11949+#endif
11950+
11951+ GET_CR3_INTO_RDI
11952+ add $__START_KERNEL_map,%rdi
11953+ sub phys_base(%rip),%rdi
11954+
11955+#ifdef CONFIG_PARAVIRT
11956+ cmpl $0, pv_info+PARAVIRT_enabled
11957+ jz 1f
11958+ mov %rdi,%rbx
11959+ i = 0
11960+ .rept USER_PGD_PTRS
11961+ mov i*8(%rbx),%rsi
11962+ mov $0x67,%sil
11963+ lea i*8(%rbx),%rdi
11964+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11965+ i = i + 1
11966+ .endr
11967+ jmp 2f
11968+1:
11969+#endif
11970+
11971+ i = 0
11972+ .rept USER_PGD_PTRS
11973+ movb $0x67,i*8(%rdi)
11974+ i = i + 1
11975+ .endr
11976+
11977+#ifdef CONFIG_PARAVIRT
11978+2: PV_RESTORE_REGS(CLBR_RDI)
11979+ popq %rbx
11980+#endif
11981+
11982+ popq %rdi
11983+ retq
11984+ENDPROC(pax_exit_kernel_user)
11985+#endif
11986+
11987+ .macro pax_erase_kstack
11988+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11989+ call pax_erase_kstack
11990+#endif
11991+ .endm
11992+
11993+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11994+/*
11995+ * r10: thread_info
11996+ * rcx, rdx: can be clobbered
11997+ */
11998+ENTRY(pax_erase_kstack)
11999+ pushq %rdi
12000+ pushq %rax
12001+
12002+ GET_THREAD_INFO(%r10)
12003+ mov TI_lowest_stack(%r10), %rdi
12004+ mov $-0xBEEF, %rax
12005+ std
12006+
12007+1: mov %edi, %ecx
12008+ and $THREAD_SIZE_asm - 1, %ecx
12009+ shr $3, %ecx
12010+ repne scasq
12011+ jecxz 2f
12012+
12013+ cmp $2*8, %ecx
12014+ jc 2f
12015+
12016+ mov $2*8, %ecx
12017+ repe scasq
12018+ jecxz 2f
12019+ jne 1b
12020+
12021+2: cld
12022+ mov %esp, %ecx
12023+ sub %edi, %ecx
12024+ shr $3, %ecx
12025+ rep stosq
12026+
12027+ mov TI_task_thread_sp0(%r10), %rdi
12028+ sub $256, %rdi
12029+ mov %rdi, TI_lowest_stack(%r10)
12030+
12031+ popq %rax
12032+ popq %rdi
12033+ ret
12034+ENDPROC(pax_erase_kstack)
12035+#endif
12036
12037 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12038 #ifdef CONFIG_TRACE_IRQFLAGS
12039@@ -318,7 +572,7 @@ ENTRY(save_args)
12040 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12041 movq_cfi rbp, 8 /* push %rbp */
12042 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12043- testl $3, CS(%rdi)
12044+ testb $3, CS(%rdi)
12045 je 1f
12046 SWAPGS
12047 /*
12048@@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12049
12050 RESTORE_REST
12051
12052- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12053+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12054 je int_ret_from_sys_call
12055
12056 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12057@@ -455,7 +709,7 @@ END(ret_from_fork)
12058 ENTRY(system_call)
12059 CFI_STARTPROC simple
12060 CFI_SIGNAL_FRAME
12061- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12062+ CFI_DEF_CFA rsp,0
12063 CFI_REGISTER rip,rcx
12064 /*CFI_REGISTER rflags,r11*/
12065 SWAPGS_UNSAFE_STACK
12066@@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12067
12068 movq %rsp,PER_CPU_VAR(old_rsp)
12069 movq PER_CPU_VAR(kernel_stack),%rsp
12070+ pax_enter_kernel_user
12071 /*
12072 * No need to follow this irqs off/on section - it's straight
12073 * and short:
12074 */
12075 ENABLE_INTERRUPTS(CLBR_NONE)
12076- SAVE_ARGS 8,1
12077+ SAVE_ARGS 8*6,1
12078 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12079 movq %rcx,RIP-ARGOFFSET(%rsp)
12080 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12081@@ -502,6 +757,7 @@ sysret_check:
12082 andl %edi,%edx
12083 jnz sysret_careful
12084 CFI_REMEMBER_STATE
12085+ pax_exit_kernel_user
12086 /*
12087 * sysretq will re-enable interrupts:
12088 */
12089@@ -560,6 +816,9 @@ auditsys:
12090 movq %rax,%rsi /* 2nd arg: syscall number */
12091 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12092 call audit_syscall_entry
12093+
12094+ pax_erase_kstack
12095+
12096 LOAD_ARGS 0 /* reload call-clobbered registers */
12097 jmp system_call_fastpath
12098
12099@@ -590,6 +849,9 @@ tracesys:
12100 FIXUP_TOP_OF_STACK %rdi
12101 movq %rsp,%rdi
12102 call syscall_trace_enter
12103+
12104+ pax_erase_kstack
12105+
12106 /*
12107 * Reload arg registers from stack in case ptrace changed them.
12108 * We don't reload %rax because syscall_trace_enter() returned
12109@@ -611,7 +873,7 @@ tracesys:
12110 GLOBAL(int_ret_from_sys_call)
12111 DISABLE_INTERRUPTS(CLBR_NONE)
12112 TRACE_IRQS_OFF
12113- testl $3,CS-ARGOFFSET(%rsp)
12114+ testb $3,CS-ARGOFFSET(%rsp)
12115 je retint_restore_args
12116 movl $_TIF_ALLWORK_MASK,%edi
12117 /* edi: mask to check */
12118@@ -793,6 +1055,16 @@ END(interrupt)
12119 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12120 call save_args
12121 PARTIAL_FRAME 0
12122+#ifdef CONFIG_PAX_MEMORY_UDEREF
12123+ testb $3, CS(%rdi)
12124+ jnz 1f
12125+ pax_enter_kernel
12126+ jmp 2f
12127+1: pax_enter_kernel_user
12128+2:
12129+#else
12130+ pax_enter_kernel
12131+#endif
12132 call \func
12133 .endm
12134
12135@@ -825,7 +1097,7 @@ ret_from_intr:
12136 CFI_ADJUST_CFA_OFFSET -8
12137 exit_intr:
12138 GET_THREAD_INFO(%rcx)
12139- testl $3,CS-ARGOFFSET(%rsp)
12140+ testb $3,CS-ARGOFFSET(%rsp)
12141 je retint_kernel
12142
12143 /* Interrupt came from user space */
12144@@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12145 * The iretq could re-enable interrupts:
12146 */
12147 DISABLE_INTERRUPTS(CLBR_ANY)
12148+ pax_exit_kernel_user
12149 TRACE_IRQS_IRETQ
12150 SWAPGS
12151 jmp restore_args
12152
12153 retint_restore_args: /* return to kernel space */
12154 DISABLE_INTERRUPTS(CLBR_ANY)
12155+ pax_exit_kernel
12156 /*
12157 * The iretq could re-enable interrupts:
12158 */
12159@@ -1027,6 +1301,16 @@ ENTRY(\sym)
12160 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12161 call error_entry
12162 DEFAULT_FRAME 0
12163+#ifdef CONFIG_PAX_MEMORY_UDEREF
12164+ testb $3, CS(%rsp)
12165+ jnz 1f
12166+ pax_enter_kernel
12167+ jmp 2f
12168+1: pax_enter_kernel_user
12169+2:
12170+#else
12171+ pax_enter_kernel
12172+#endif
12173 movq %rsp,%rdi /* pt_regs pointer */
12174 xorl %esi,%esi /* no error code */
12175 call \do_sym
12176@@ -1044,6 +1328,16 @@ ENTRY(\sym)
12177 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12178 call save_paranoid
12179 TRACE_IRQS_OFF
12180+#ifdef CONFIG_PAX_MEMORY_UDEREF
12181+ testb $3, CS(%rsp)
12182+ jnz 1f
12183+ pax_enter_kernel
12184+ jmp 2f
12185+1: pax_enter_kernel_user
12186+2:
12187+#else
12188+ pax_enter_kernel
12189+#endif
12190 movq %rsp,%rdi /* pt_regs pointer */
12191 xorl %esi,%esi /* no error code */
12192 call \do_sym
12193@@ -1052,7 +1346,7 @@ ENTRY(\sym)
12194 END(\sym)
12195 .endm
12196
12197-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12198+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12199 .macro paranoidzeroentry_ist sym do_sym ist
12200 ENTRY(\sym)
12201 INTR_FRAME
12202@@ -1062,8 +1356,24 @@ ENTRY(\sym)
12203 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12204 call save_paranoid
12205 TRACE_IRQS_OFF
12206+#ifdef CONFIG_PAX_MEMORY_UDEREF
12207+ testb $3, CS(%rsp)
12208+ jnz 1f
12209+ pax_enter_kernel
12210+ jmp 2f
12211+1: pax_enter_kernel_user
12212+2:
12213+#else
12214+ pax_enter_kernel
12215+#endif
12216 movq %rsp,%rdi /* pt_regs pointer */
12217 xorl %esi,%esi /* no error code */
12218+#ifdef CONFIG_SMP
12219+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12220+ lea init_tss(%r12), %r12
12221+#else
12222+ lea init_tss(%rip), %r12
12223+#endif
12224 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12225 call \do_sym
12226 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12227@@ -1080,6 +1390,16 @@ ENTRY(\sym)
12228 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12229 call error_entry
12230 DEFAULT_FRAME 0
12231+#ifdef CONFIG_PAX_MEMORY_UDEREF
12232+ testb $3, CS(%rsp)
12233+ jnz 1f
12234+ pax_enter_kernel
12235+ jmp 2f
12236+1: pax_enter_kernel_user
12237+2:
12238+#else
12239+ pax_enter_kernel
12240+#endif
12241 movq %rsp,%rdi /* pt_regs pointer */
12242 movq ORIG_RAX(%rsp),%rsi /* get error code */
12243 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12244@@ -1099,6 +1419,16 @@ ENTRY(\sym)
12245 call save_paranoid
12246 DEFAULT_FRAME 0
12247 TRACE_IRQS_OFF
12248+#ifdef CONFIG_PAX_MEMORY_UDEREF
12249+ testb $3, CS(%rsp)
12250+ jnz 1f
12251+ pax_enter_kernel
12252+ jmp 2f
12253+1: pax_enter_kernel_user
12254+2:
12255+#else
12256+ pax_enter_kernel
12257+#endif
12258 movq %rsp,%rdi /* pt_regs pointer */
12259 movq ORIG_RAX(%rsp),%rsi /* get error code */
12260 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12261@@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12262 TRACE_IRQS_OFF
12263 testl %ebx,%ebx /* swapgs needed? */
12264 jnz paranoid_restore
12265- testl $3,CS(%rsp)
12266+ testb $3,CS(%rsp)
12267 jnz paranoid_userspace
12268+#ifdef CONFIG_PAX_MEMORY_UDEREF
12269+ pax_exit_kernel
12270+ TRACE_IRQS_IRETQ 0
12271+ SWAPGS_UNSAFE_STACK
12272+ RESTORE_ALL 8
12273+ jmp irq_return
12274+#endif
12275 paranoid_swapgs:
12276+#ifdef CONFIG_PAX_MEMORY_UDEREF
12277+ pax_exit_kernel_user
12278+#else
12279+ pax_exit_kernel
12280+#endif
12281 TRACE_IRQS_IRETQ 0
12282 SWAPGS_UNSAFE_STACK
12283 RESTORE_ALL 8
12284 jmp irq_return
12285 paranoid_restore:
12286+ pax_exit_kernel
12287 TRACE_IRQS_IRETQ 0
12288 RESTORE_ALL 8
12289 jmp irq_return
12290@@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12291 movq_cfi r14, R14+8
12292 movq_cfi r15, R15+8
12293 xorl %ebx,%ebx
12294- testl $3,CS+8(%rsp)
12295+ testb $3,CS+8(%rsp)
12296 je error_kernelspace
12297 error_swapgs:
12298 SWAPGS
12299@@ -1490,6 +1833,16 @@ ENTRY(nmi)
12300 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12301 call save_paranoid
12302 DEFAULT_FRAME 0
12303+#ifdef CONFIG_PAX_MEMORY_UDEREF
12304+ testb $3, CS(%rsp)
12305+ jnz 1f
12306+ pax_enter_kernel
12307+ jmp 2f
12308+1: pax_enter_kernel_user
12309+2:
12310+#else
12311+ pax_enter_kernel
12312+#endif
12313 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12314 movq %rsp,%rdi
12315 movq $-1,%rsi
12316@@ -1500,11 +1853,25 @@ ENTRY(nmi)
12317 DISABLE_INTERRUPTS(CLBR_NONE)
12318 testl %ebx,%ebx /* swapgs needed? */
12319 jnz nmi_restore
12320- testl $3,CS(%rsp)
12321+ testb $3,CS(%rsp)
12322 jnz nmi_userspace
12323+#ifdef CONFIG_PAX_MEMORY_UDEREF
12324+ pax_exit_kernel
12325+ SWAPGS_UNSAFE_STACK
12326+ RESTORE_ALL 8
12327+ jmp irq_return
12328+#endif
12329 nmi_swapgs:
12330+#ifdef CONFIG_PAX_MEMORY_UDEREF
12331+ pax_exit_kernel_user
12332+#else
12333+ pax_exit_kernel
12334+#endif
12335 SWAPGS_UNSAFE_STACK
12336+ RESTORE_ALL 8
12337+ jmp irq_return
12338 nmi_restore:
12339+ pax_exit_kernel
12340 RESTORE_ALL 8
12341 jmp irq_return
12342 nmi_userspace:
12343diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12344--- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12345+++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12346@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12347 static void *mod_code_newcode; /* holds the text to write to the IP */
12348
12349 static unsigned nmi_wait_count;
12350-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12351+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12352
12353 int ftrace_arch_read_dyn_info(char *buf, int size)
12354 {
12355@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12356
12357 r = snprintf(buf, size, "%u %u",
12358 nmi_wait_count,
12359- atomic_read(&nmi_update_count));
12360+ atomic_read_unchecked(&nmi_update_count));
12361 return r;
12362 }
12363
12364@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12365
12366 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12367 smp_rmb();
12368+ pax_open_kernel();
12369 ftrace_mod_code();
12370- atomic_inc(&nmi_update_count);
12371+ pax_close_kernel();
12372+ atomic_inc_unchecked(&nmi_update_count);
12373 }
12374 /* Must have previous changes seen before executions */
12375 smp_mb();
12376@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12377 {
12378 unsigned char replaced[MCOUNT_INSN_SIZE];
12379
12380+ ip = ktla_ktva(ip);
12381+
12382 /*
12383 * Note: Due to modules and __init, code can
12384 * disappear and change, we need to protect against faulting
12385@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12386 unsigned char old[MCOUNT_INSN_SIZE], *new;
12387 int ret;
12388
12389- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12390+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12391 new = ftrace_call_replace(ip, (unsigned long)func);
12392 ret = ftrace_modify_code(ip, old, new);
12393
12394@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12395 {
12396 unsigned char code[MCOUNT_INSN_SIZE];
12397
12398+ ip = ktla_ktva(ip);
12399+
12400 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12401 return -EFAULT;
12402
12403diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12404--- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12405+++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12406@@ -19,6 +19,7 @@
12407 #include <asm/io_apic.h>
12408 #include <asm/bios_ebda.h>
12409 #include <asm/tlbflush.h>
12410+#include <asm/boot.h>
12411
12412 static void __init i386_default_early_setup(void)
12413 {
12414@@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12415 {
12416 memblock_init();
12417
12418- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12419+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12420
12421 #ifdef CONFIG_BLK_DEV_INITRD
12422 /* Reserve INITRD */
12423diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12424--- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12425+++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12426@@ -25,6 +25,12 @@
12427 /* Physical address */
12428 #define pa(X) ((X) - __PAGE_OFFSET)
12429
12430+#ifdef CONFIG_PAX_KERNEXEC
12431+#define ta(X) (X)
12432+#else
12433+#define ta(X) ((X) - __PAGE_OFFSET)
12434+#endif
12435+
12436 /*
12437 * References to members of the new_cpu_data structure.
12438 */
12439@@ -54,11 +60,7 @@
12440 * and small than max_low_pfn, otherwise will waste some page table entries
12441 */
12442
12443-#if PTRS_PER_PMD > 1
12444-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12445-#else
12446-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12447-#endif
12448+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12449
12450 /* Number of possible pages in the lowmem region */
12451 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12452@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12453 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12454
12455 /*
12456+ * Real beginning of normal "text" segment
12457+ */
12458+ENTRY(stext)
12459+ENTRY(_stext)
12460+
12461+/*
12462 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12463 * %esi points to the real-mode code as a 32-bit pointer.
12464 * CS and DS must be 4 GB flat segments, but we don't depend on
12465@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12466 * can.
12467 */
12468 __HEAD
12469+
12470+#ifdef CONFIG_PAX_KERNEXEC
12471+ jmp startup_32
12472+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12473+.fill PAGE_SIZE-5,1,0xcc
12474+#endif
12475+
12476 ENTRY(startup_32)
12477 movl pa(stack_start),%ecx
12478
12479@@ -105,6 +120,57 @@ ENTRY(startup_32)
12480 2:
12481 leal -__PAGE_OFFSET(%ecx),%esp
12482
12483+#ifdef CONFIG_SMP
12484+ movl $pa(cpu_gdt_table),%edi
12485+ movl $__per_cpu_load,%eax
12486+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12487+ rorl $16,%eax
12488+ movb %al,__KERNEL_PERCPU + 4(%edi)
12489+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12490+ movl $__per_cpu_end - 1,%eax
12491+ subl $__per_cpu_start,%eax
12492+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12493+#endif
12494+
12495+#ifdef CONFIG_PAX_MEMORY_UDEREF
12496+ movl $NR_CPUS,%ecx
12497+ movl $pa(cpu_gdt_table),%edi
12498+1:
12499+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12500+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12501+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12502+ addl $PAGE_SIZE_asm,%edi
12503+ loop 1b
12504+#endif
12505+
12506+#ifdef CONFIG_PAX_KERNEXEC
12507+ movl $pa(boot_gdt),%edi
12508+ movl $__LOAD_PHYSICAL_ADDR,%eax
12509+ movw %ax,__BOOT_CS + 2(%edi)
12510+ rorl $16,%eax
12511+ movb %al,__BOOT_CS + 4(%edi)
12512+ movb %ah,__BOOT_CS + 7(%edi)
12513+ rorl $16,%eax
12514+
12515+ ljmp $(__BOOT_CS),$1f
12516+1:
12517+
12518+ movl $NR_CPUS,%ecx
12519+ movl $pa(cpu_gdt_table),%edi
12520+ addl $__PAGE_OFFSET,%eax
12521+1:
12522+ movw %ax,__KERNEL_CS + 2(%edi)
12523+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12524+ rorl $16,%eax
12525+ movb %al,__KERNEL_CS + 4(%edi)
12526+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12527+ movb %ah,__KERNEL_CS + 7(%edi)
12528+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12529+ rorl $16,%eax
12530+ addl $PAGE_SIZE_asm,%edi
12531+ loop 1b
12532+#endif
12533+
12534 /*
12535 * Clear BSS first so that there are no surprises...
12536 */
12537@@ -195,8 +261,11 @@ ENTRY(startup_32)
12538 movl %eax, pa(max_pfn_mapped)
12539
12540 /* Do early initialization of the fixmap area */
12541- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12542- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12543+#ifdef CONFIG_COMPAT_VDSO
12544+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12545+#else
12546+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12547+#endif
12548 #else /* Not PAE */
12549
12550 page_pde_offset = (__PAGE_OFFSET >> 20);
12551@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12552 movl %eax, pa(max_pfn_mapped)
12553
12554 /* Do early initialization of the fixmap area */
12555- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12556- movl %eax,pa(initial_page_table+0xffc)
12557+#ifdef CONFIG_COMPAT_VDSO
12558+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12559+#else
12560+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12561+#endif
12562 #endif
12563
12564 #ifdef CONFIG_PARAVIRT
12565@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12566 cmpl $num_subarch_entries, %eax
12567 jae bad_subarch
12568
12569- movl pa(subarch_entries)(,%eax,4), %eax
12570- subl $__PAGE_OFFSET, %eax
12571- jmp *%eax
12572+ jmp *pa(subarch_entries)(,%eax,4)
12573
12574 bad_subarch:
12575 WEAK(lguest_entry)
12576@@ -255,10 +325,10 @@ WEAK(xen_entry)
12577 __INITDATA
12578
12579 subarch_entries:
12580- .long default_entry /* normal x86/PC */
12581- .long lguest_entry /* lguest hypervisor */
12582- .long xen_entry /* Xen hypervisor */
12583- .long default_entry /* Moorestown MID */
12584+ .long ta(default_entry) /* normal x86/PC */
12585+ .long ta(lguest_entry) /* lguest hypervisor */
12586+ .long ta(xen_entry) /* Xen hypervisor */
12587+ .long ta(default_entry) /* Moorestown MID */
12588 num_subarch_entries = (. - subarch_entries) / 4
12589 .previous
12590 #else
12591@@ -312,6 +382,7 @@ default_entry:
12592 orl %edx,%eax
12593 movl %eax,%cr4
12594
12595+#ifdef CONFIG_X86_PAE
12596 testb $X86_CR4_PAE, %al # check if PAE is enabled
12597 jz 6f
12598
12599@@ -340,6 +411,9 @@ default_entry:
12600 /* Make changes effective */
12601 wrmsr
12602
12603+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12604+#endif
12605+
12606 6:
12607
12608 /*
12609@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12610 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12611 movl %eax,%ss # after changing gdt.
12612
12613- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12614+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12615 movl %eax,%ds
12616 movl %eax,%es
12617
12618@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12619 */
12620 cmpb $0,ready
12621 jne 1f
12622- movl $gdt_page,%eax
12623+ movl $cpu_gdt_table,%eax
12624 movl $stack_canary,%ecx
12625+#ifdef CONFIG_SMP
12626+ addl $__per_cpu_load,%ecx
12627+#endif
12628 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12629 shrl $16, %ecx
12630 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12631 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12632 1:
12633-#endif
12634 movl $(__KERNEL_STACK_CANARY),%eax
12635+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12636+ movl $(__USER_DS),%eax
12637+#else
12638+ xorl %eax,%eax
12639+#endif
12640 movl %eax,%gs
12641
12642 xorl %eax,%eax # Clear LDT
12643@@ -558,22 +639,22 @@ early_page_fault:
12644 jmp early_fault
12645
12646 early_fault:
12647- cld
12648 #ifdef CONFIG_PRINTK
12649+ cmpl $1,%ss:early_recursion_flag
12650+ je hlt_loop
12651+ incl %ss:early_recursion_flag
12652+ cld
12653 pusha
12654 movl $(__KERNEL_DS),%eax
12655 movl %eax,%ds
12656 movl %eax,%es
12657- cmpl $2,early_recursion_flag
12658- je hlt_loop
12659- incl early_recursion_flag
12660 movl %cr2,%eax
12661 pushl %eax
12662 pushl %edx /* trapno */
12663 pushl $fault_msg
12664 call printk
12665+; call dump_stack
12666 #endif
12667- call dump_stack
12668 hlt_loop:
12669 hlt
12670 jmp hlt_loop
12671@@ -581,8 +662,11 @@ hlt_loop:
12672 /* This is the default interrupt "handler" :-) */
12673 ALIGN
12674 ignore_int:
12675- cld
12676 #ifdef CONFIG_PRINTK
12677+ cmpl $2,%ss:early_recursion_flag
12678+ je hlt_loop
12679+ incl %ss:early_recursion_flag
12680+ cld
12681 pushl %eax
12682 pushl %ecx
12683 pushl %edx
12684@@ -591,9 +675,6 @@ ignore_int:
12685 movl $(__KERNEL_DS),%eax
12686 movl %eax,%ds
12687 movl %eax,%es
12688- cmpl $2,early_recursion_flag
12689- je hlt_loop
12690- incl early_recursion_flag
12691 pushl 16(%esp)
12692 pushl 24(%esp)
12693 pushl 32(%esp)
12694@@ -622,29 +703,43 @@ ENTRY(initial_code)
12695 /*
12696 * BSS section
12697 */
12698-__PAGE_ALIGNED_BSS
12699- .align PAGE_SIZE
12700 #ifdef CONFIG_X86_PAE
12701+.section .initial_pg_pmd,"a",@progbits
12702 initial_pg_pmd:
12703 .fill 1024*KPMDS,4,0
12704 #else
12705+.section .initial_page_table,"a",@progbits
12706 ENTRY(initial_page_table)
12707 .fill 1024,4,0
12708 #endif
12709+.section .initial_pg_fixmap,"a",@progbits
12710 initial_pg_fixmap:
12711 .fill 1024,4,0
12712+.section .empty_zero_page,"a",@progbits
12713 ENTRY(empty_zero_page)
12714 .fill 4096,1,0
12715+.section .swapper_pg_dir,"a",@progbits
12716 ENTRY(swapper_pg_dir)
12717+#ifdef CONFIG_X86_PAE
12718+ .fill 4,8,0
12719+#else
12720 .fill 1024,4,0
12721+#endif
12722+
12723+/*
12724+ * The IDT has to be page-aligned to simplify the Pentium
12725+ * F0 0F bug workaround.. We have a special link segment
12726+ * for this.
12727+ */
12728+.section .idt,"a",@progbits
12729+ENTRY(idt_table)
12730+ .fill 256,8,0
12731
12732 /*
12733 * This starts the data section.
12734 */
12735 #ifdef CONFIG_X86_PAE
12736-__PAGE_ALIGNED_DATA
12737- /* Page-aligned for the benefit of paravirt? */
12738- .align PAGE_SIZE
12739+.section .initial_page_table,"a",@progbits
12740 ENTRY(initial_page_table)
12741 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12742 # if KPMDS == 3
12743@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12744 # error "Kernel PMDs should be 1, 2 or 3"
12745 # endif
12746 .align PAGE_SIZE /* needs to be page-sized too */
12747+
12748+#ifdef CONFIG_PAX_PER_CPU_PGD
12749+ENTRY(cpu_pgd)
12750+ .rept NR_CPUS
12751+ .fill 4,8,0
12752+ .endr
12753+#endif
12754+
12755 #endif
12756
12757 .data
12758 .balign 4
12759 ENTRY(stack_start)
12760- .long init_thread_union+THREAD_SIZE
12761+ .long init_thread_union+THREAD_SIZE-8
12762+
12763+ready: .byte 0
12764
12765+.section .rodata,"a",@progbits
12766 early_recursion_flag:
12767 .long 0
12768
12769-ready: .byte 0
12770-
12771 int_msg:
12772 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12773
12774@@ -707,7 +811,7 @@ fault_msg:
12775 .word 0 # 32 bit align gdt_desc.address
12776 boot_gdt_descr:
12777 .word __BOOT_DS+7
12778- .long boot_gdt - __PAGE_OFFSET
12779+ .long pa(boot_gdt)
12780
12781 .word 0 # 32-bit align idt_desc.address
12782 idt_descr:
12783@@ -718,7 +822,7 @@ idt_descr:
12784 .word 0 # 32 bit align gdt_desc.address
12785 ENTRY(early_gdt_descr)
12786 .word GDT_ENTRIES*8-1
12787- .long gdt_page /* Overwritten for secondary CPUs */
12788+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12789
12790 /*
12791 * The boot_gdt must mirror the equivalent in setup.S and is
12792@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12793 .align L1_CACHE_BYTES
12794 ENTRY(boot_gdt)
12795 .fill GDT_ENTRY_BOOT_CS,8,0
12796- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12797- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12798+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12799+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12800+
12801+ .align PAGE_SIZE_asm
12802+ENTRY(cpu_gdt_table)
12803+ .rept NR_CPUS
12804+ .quad 0x0000000000000000 /* NULL descriptor */
12805+ .quad 0x0000000000000000 /* 0x0b reserved */
12806+ .quad 0x0000000000000000 /* 0x13 reserved */
12807+ .quad 0x0000000000000000 /* 0x1b reserved */
12808+
12809+#ifdef CONFIG_PAX_KERNEXEC
12810+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12811+#else
12812+ .quad 0x0000000000000000 /* 0x20 unused */
12813+#endif
12814+
12815+ .quad 0x0000000000000000 /* 0x28 unused */
12816+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12817+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12818+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12819+ .quad 0x0000000000000000 /* 0x4b reserved */
12820+ .quad 0x0000000000000000 /* 0x53 reserved */
12821+ .quad 0x0000000000000000 /* 0x5b reserved */
12822+
12823+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12824+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12825+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12826+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12827+
12828+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12829+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12830+
12831+ /*
12832+ * Segments used for calling PnP BIOS have byte granularity.
12833+ * The code segments and data segments have fixed 64k limits,
12834+ * the transfer segment sizes are set at run time.
12835+ */
12836+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12837+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12838+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12839+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12840+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12841+
12842+ /*
12843+ * The APM segments have byte granularity and their bases
12844+ * are set at run time. All have 64k limits.
12845+ */
12846+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12847+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12848+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12849+
12850+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12851+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12852+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12853+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12854+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12855+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12856+
12857+ /* Be sure this is zeroed to avoid false validations in Xen */
12858+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12859+ .endr
12860diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12861--- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12862+++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12863@@ -19,6 +19,7 @@
12864 #include <asm/cache.h>
12865 #include <asm/processor-flags.h>
12866 #include <asm/percpu.h>
12867+#include <asm/cpufeature.h>
12868
12869 #ifdef CONFIG_PARAVIRT
12870 #include <asm/asm-offsets.h>
12871@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12872 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12873 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12874 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12875+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12876+L3_VMALLOC_START = pud_index(VMALLOC_START)
12877+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12878+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12879
12880 .text
12881 __HEAD
12882@@ -85,35 +90,22 @@ startup_64:
12883 */
12884 addq %rbp, init_level4_pgt + 0(%rip)
12885 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12886+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12887+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12888 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12889
12890 addq %rbp, level3_ident_pgt + 0(%rip)
12891+#ifndef CONFIG_XEN
12892+ addq %rbp, level3_ident_pgt + 8(%rip)
12893+#endif
12894
12895- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12896- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12897+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12898
12899- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12900+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12901+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12902
12903- /* Add an Identity mapping if I am above 1G */
12904- leaq _text(%rip), %rdi
12905- andq $PMD_PAGE_MASK, %rdi
12906-
12907- movq %rdi, %rax
12908- shrq $PUD_SHIFT, %rax
12909- andq $(PTRS_PER_PUD - 1), %rax
12910- jz ident_complete
12911-
12912- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12913- leaq level3_ident_pgt(%rip), %rbx
12914- movq %rdx, 0(%rbx, %rax, 8)
12915-
12916- movq %rdi, %rax
12917- shrq $PMD_SHIFT, %rax
12918- andq $(PTRS_PER_PMD - 1), %rax
12919- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12920- leaq level2_spare_pgt(%rip), %rbx
12921- movq %rdx, 0(%rbx, %rax, 8)
12922-ident_complete:
12923+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12924+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12925
12926 /*
12927 * Fixup the kernel text+data virtual addresses. Note that
12928@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12929 * after the boot processor executes this code.
12930 */
12931
12932- /* Enable PAE mode and PGE */
12933- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12934+ /* Enable PAE mode and PSE/PGE */
12935+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12936 movq %rax, %cr4
12937
12938 /* Setup early boot stage 4 level pagetables. */
12939@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12940 movl $MSR_EFER, %ecx
12941 rdmsr
12942 btsl $_EFER_SCE, %eax /* Enable System Call */
12943- btl $20,%edi /* No Execute supported? */
12944+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12945 jnc 1f
12946 btsl $_EFER_NX, %eax
12947+ leaq init_level4_pgt(%rip), %rdi
12948+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12949+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12950+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12951+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12952 1: wrmsr /* Make changes effective */
12953
12954 /* Setup cr0 */
12955@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12956 bad_address:
12957 jmp bad_address
12958
12959- .section ".init.text","ax"
12960+ __INIT
12961 #ifdef CONFIG_EARLY_PRINTK
12962 .globl early_idt_handlers
12963 early_idt_handlers:
12964@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12965 #endif /* EARLY_PRINTK */
12966 1: hlt
12967 jmp 1b
12968+ .previous
12969
12970 #ifdef CONFIG_EARLY_PRINTK
12971+ __INITDATA
12972 early_recursion_flag:
12973 .long 0
12974+ .previous
12975
12976+ .section .rodata,"a",@progbits
12977 early_idt_msg:
12978 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12979 early_idt_ripmsg:
12980 .asciz "RIP %s\n"
12981-#endif /* CONFIG_EARLY_PRINTK */
12982 .previous
12983+#endif /* CONFIG_EARLY_PRINTK */
12984
12985+ .section .rodata,"a",@progbits
12986 #define NEXT_PAGE(name) \
12987 .balign PAGE_SIZE; \
12988 ENTRY(name)
12989@@ -338,7 +340,6 @@ ENTRY(name)
12990 i = i + 1 ; \
12991 .endr
12992
12993- .data
12994 /*
12995 * This default setting generates an ident mapping at address 0x100000
12996 * and a mapping for the kernel that precisely maps virtual address
12997@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12998 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12999 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13000 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13001+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13002+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13003+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13004+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13005 .org init_level4_pgt + L4_START_KERNEL*8, 0
13006 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13007 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13008
13009+#ifdef CONFIG_PAX_PER_CPU_PGD
13010+NEXT_PAGE(cpu_pgd)
13011+ .rept NR_CPUS
13012+ .fill 512,8,0
13013+ .endr
13014+#endif
13015+
13016 NEXT_PAGE(level3_ident_pgt)
13017 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13018+#ifdef CONFIG_XEN
13019 .fill 511,8,0
13020+#else
13021+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13022+ .fill 510,8,0
13023+#endif
13024+
13025+NEXT_PAGE(level3_vmalloc_pgt)
13026+ .fill 512,8,0
13027+
13028+NEXT_PAGE(level3_vmemmap_pgt)
13029+ .fill L3_VMEMMAP_START,8,0
13030+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13031
13032 NEXT_PAGE(level3_kernel_pgt)
13033 .fill L3_START_KERNEL,8,0
13034@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13035 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13036 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13037
13038+NEXT_PAGE(level2_vmemmap_pgt)
13039+ .fill 512,8,0
13040+
13041 NEXT_PAGE(level2_fixmap_pgt)
13042- .fill 506,8,0
13043- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13044- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13045- .fill 5,8,0
13046+ .fill 507,8,0
13047+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13048+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13049+ .fill 4,8,0
13050
13051-NEXT_PAGE(level1_fixmap_pgt)
13052+NEXT_PAGE(level1_vsyscall_pgt)
13053 .fill 512,8,0
13054
13055-NEXT_PAGE(level2_ident_pgt)
13056- /* Since I easily can, map the first 1G.
13057+ /* Since I easily can, map the first 2G.
13058 * Don't set NX because code runs from these pages.
13059 */
13060- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13061+NEXT_PAGE(level2_ident_pgt)
13062+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13063
13064 NEXT_PAGE(level2_kernel_pgt)
13065 /*
13066@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13067 * If you want to increase this then increase MODULES_VADDR
13068 * too.)
13069 */
13070- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13071- KERNEL_IMAGE_SIZE/PMD_SIZE)
13072-
13073-NEXT_PAGE(level2_spare_pgt)
13074- .fill 512, 8, 0
13075+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13076
13077 #undef PMDS
13078 #undef NEXT_PAGE
13079
13080- .data
13081+ .align PAGE_SIZE
13082+ENTRY(cpu_gdt_table)
13083+ .rept NR_CPUS
13084+ .quad 0x0000000000000000 /* NULL descriptor */
13085+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13086+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13087+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13088+ .quad 0x00cffb000000ffff /* __USER32_CS */
13089+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13090+ .quad 0x00affb000000ffff /* __USER_CS */
13091+
13092+#ifdef CONFIG_PAX_KERNEXEC
13093+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13094+#else
13095+ .quad 0x0 /* unused */
13096+#endif
13097+
13098+ .quad 0,0 /* TSS */
13099+ .quad 0,0 /* LDT */
13100+ .quad 0,0,0 /* three TLS descriptors */
13101+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13102+ /* asm/segment.h:GDT_ENTRIES must match this */
13103+
13104+ /* zero the remaining page */
13105+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13106+ .endr
13107+
13108 .align 16
13109 .globl early_gdt_descr
13110 early_gdt_descr:
13111 .word GDT_ENTRIES*8-1
13112 early_gdt_descr_base:
13113- .quad INIT_PER_CPU_VAR(gdt_page)
13114+ .quad cpu_gdt_table
13115
13116 ENTRY(phys_base)
13117 /* This must match the first entry in level2_kernel_pgt */
13118 .quad 0x0000000000000000
13119
13120 #include "../../x86/xen/xen-head.S"
13121-
13122- .section .bss, "aw", @nobits
13123+
13124+ .section .rodata,"a",@progbits
13125 .align L1_CACHE_BYTES
13126 ENTRY(idt_table)
13127- .skip IDT_ENTRIES * 16
13128+ .fill 512,8,0
13129
13130 __PAGE_ALIGNED_BSS
13131 .align PAGE_SIZE
13132diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13133--- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13134+++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13135@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13136 EXPORT_SYMBOL(cmpxchg8b_emu);
13137 #endif
13138
13139+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13140+
13141 /* Networking helper routines. */
13142 EXPORT_SYMBOL(csum_partial_copy_generic);
13143+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13144+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13145
13146 EXPORT_SYMBOL(__get_user_1);
13147 EXPORT_SYMBOL(__get_user_2);
13148@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13149
13150 EXPORT_SYMBOL(csum_partial);
13151 EXPORT_SYMBOL(empty_zero_page);
13152+
13153+#ifdef CONFIG_PAX_KERNEXEC
13154+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13155+#endif
13156diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13157--- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13158+++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13159@@ -210,7 +210,7 @@ spurious_8259A_irq:
13160 "spurious 8259A interrupt: IRQ%d.\n", irq);
13161 spurious_irq_mask |= irqmask;
13162 }
13163- atomic_inc(&irq_err_count);
13164+ atomic_inc_unchecked(&irq_err_count);
13165 /*
13166 * Theoretically we do not have to handle this IRQ,
13167 * but in Linux this does not cause problems and is
13168diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13169--- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13170+++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13171@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13172 * way process stacks are handled. This is done by having a special
13173 * "init_task" linker map entry..
13174 */
13175-union thread_union init_thread_union __init_task_data =
13176- { INIT_THREAD_INFO(init_task) };
13177+union thread_union init_thread_union __init_task_data;
13178
13179 /*
13180 * Initial task structure.
13181@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13182 * section. Since TSS's are completely CPU-local, we want them
13183 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13184 */
13185-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13186-
13187+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13188+EXPORT_SYMBOL(init_tss);
13189diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13190--- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13191+++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13192@@ -6,6 +6,7 @@
13193 #include <linux/sched.h>
13194 #include <linux/kernel.h>
13195 #include <linux/capability.h>
13196+#include <linux/security.h>
13197 #include <linux/errno.h>
13198 #include <linux/types.h>
13199 #include <linux/ioport.h>
13200@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13201
13202 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13203 return -EINVAL;
13204+#ifdef CONFIG_GRKERNSEC_IO
13205+ if (turn_on && grsec_disable_privio) {
13206+ gr_handle_ioperm();
13207+ return -EPERM;
13208+ }
13209+#endif
13210 if (turn_on && !capable(CAP_SYS_RAWIO))
13211 return -EPERM;
13212
13213@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13214 * because the ->io_bitmap_max value must match the bitmap
13215 * contents:
13216 */
13217- tss = &per_cpu(init_tss, get_cpu());
13218+ tss = init_tss + get_cpu();
13219
13220 if (turn_on)
13221 bitmap_clear(t->io_bitmap_ptr, from, num);
13222@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13223 return -EINVAL;
13224 /* Trying to gain more privileges? */
13225 if (level > old) {
13226+#ifdef CONFIG_GRKERNSEC_IO
13227+ if (grsec_disable_privio) {
13228+ gr_handle_iopl();
13229+ return -EPERM;
13230+ }
13231+#endif
13232 if (!capable(CAP_SYS_RAWIO))
13233 return -EPERM;
13234 }
13235diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13236--- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13237+++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13238@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13239 __asm__ __volatile__("andl %%esp,%0" :
13240 "=r" (sp) : "0" (THREAD_SIZE - 1));
13241
13242- return sp < (sizeof(struct thread_info) + STACK_WARN);
13243+ return sp < STACK_WARN;
13244 }
13245
13246 static void print_stack_overflow(void)
13247@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13248 * per-CPU IRQ handling contexts (thread information and stack)
13249 */
13250 union irq_ctx {
13251- struct thread_info tinfo;
13252- u32 stack[THREAD_SIZE/sizeof(u32)];
13253+ unsigned long previous_esp;
13254+ u32 stack[THREAD_SIZE/sizeof(u32)];
13255 } __attribute__((aligned(THREAD_SIZE)));
13256
13257 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13258@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13259 static inline int
13260 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13261 {
13262- union irq_ctx *curctx, *irqctx;
13263+ union irq_ctx *irqctx;
13264 u32 *isp, arg1, arg2;
13265
13266- curctx = (union irq_ctx *) current_thread_info();
13267 irqctx = __this_cpu_read(hardirq_ctx);
13268
13269 /*
13270@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13271 * handler) we can't do that and just have to keep using the
13272 * current stack (which is the irq stack already after all)
13273 */
13274- if (unlikely(curctx == irqctx))
13275+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13276 return 0;
13277
13278 /* build the stack frame on the IRQ stack */
13279- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13280- irqctx->tinfo.task = curctx->tinfo.task;
13281- irqctx->tinfo.previous_esp = current_stack_pointer;
13282+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13283+ irqctx->previous_esp = current_stack_pointer;
13284
13285- /*
13286- * Copy the softirq bits in preempt_count so that the
13287- * softirq checks work in the hardirq context.
13288- */
13289- irqctx->tinfo.preempt_count =
13290- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13291- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13292+#ifdef CONFIG_PAX_MEMORY_UDEREF
13293+ __set_fs(MAKE_MM_SEG(0));
13294+#endif
13295
13296 if (unlikely(overflow))
13297 call_on_stack(print_stack_overflow, isp);
13298@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13299 : "0" (irq), "1" (desc), "2" (isp),
13300 "D" (desc->handle_irq)
13301 : "memory", "cc", "ecx");
13302+
13303+#ifdef CONFIG_PAX_MEMORY_UDEREF
13304+ __set_fs(current_thread_info()->addr_limit);
13305+#endif
13306+
13307 return 1;
13308 }
13309
13310@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13311 */
13312 void __cpuinit irq_ctx_init(int cpu)
13313 {
13314- union irq_ctx *irqctx;
13315-
13316 if (per_cpu(hardirq_ctx, cpu))
13317 return;
13318
13319- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13320- THREAD_FLAGS,
13321- THREAD_ORDER));
13322- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13323- irqctx->tinfo.cpu = cpu;
13324- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13325- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13326-
13327- per_cpu(hardirq_ctx, cpu) = irqctx;
13328-
13329- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13330- THREAD_FLAGS,
13331- THREAD_ORDER));
13332- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13333- irqctx->tinfo.cpu = cpu;
13334- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13335-
13336- per_cpu(softirq_ctx, cpu) = irqctx;
13337+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13338+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13339
13340 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13341 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13342@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13343 asmlinkage void do_softirq(void)
13344 {
13345 unsigned long flags;
13346- struct thread_info *curctx;
13347 union irq_ctx *irqctx;
13348 u32 *isp;
13349
13350@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13351 local_irq_save(flags);
13352
13353 if (local_softirq_pending()) {
13354- curctx = current_thread_info();
13355 irqctx = __this_cpu_read(softirq_ctx);
13356- irqctx->tinfo.task = curctx->task;
13357- irqctx->tinfo.previous_esp = current_stack_pointer;
13358+ irqctx->previous_esp = current_stack_pointer;
13359
13360 /* build the stack frame on the softirq stack */
13361- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13362+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13363+
13364+#ifdef CONFIG_PAX_MEMORY_UDEREF
13365+ __set_fs(MAKE_MM_SEG(0));
13366+#endif
13367
13368 call_on_stack(__do_softirq, isp);
13369+
13370+#ifdef CONFIG_PAX_MEMORY_UDEREF
13371+ __set_fs(current_thread_info()->addr_limit);
13372+#endif
13373+
13374 /*
13375 * Shouldn't happen, we returned above if in_interrupt():
13376 */
13377diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13378--- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13379+++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13380@@ -17,7 +17,7 @@
13381 #include <asm/mce.h>
13382 #include <asm/hw_irq.h>
13383
13384-atomic_t irq_err_count;
13385+atomic_unchecked_t irq_err_count;
13386
13387 /* Function pointer for generic interrupt vector handling */
13388 void (*x86_platform_ipi_callback)(void) = NULL;
13389@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13390 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13391 seq_printf(p, " Machine check polls\n");
13392 #endif
13393- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13394+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13395 #if defined(CONFIG_X86_IO_APIC)
13396- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13397+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13398 #endif
13399 return 0;
13400 }
13401@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13402
13403 u64 arch_irq_stat(void)
13404 {
13405- u64 sum = atomic_read(&irq_err_count);
13406+ u64 sum = atomic_read_unchecked(&irq_err_count);
13407
13408 #ifdef CONFIG_X86_IO_APIC
13409- sum += atomic_read(&irq_mis_count);
13410+ sum += atomic_read_unchecked(&irq_mis_count);
13411 #endif
13412 return sum;
13413 }
13414diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13415--- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13416+++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13417@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13418 #ifdef CONFIG_X86_32
13419 switch (regno) {
13420 case GDB_SS:
13421- if (!user_mode_vm(regs))
13422+ if (!user_mode(regs))
13423 *(unsigned long *)mem = __KERNEL_DS;
13424 break;
13425 case GDB_SP:
13426- if (!user_mode_vm(regs))
13427+ if (!user_mode(regs))
13428 *(unsigned long *)mem = kernel_stack_pointer(regs);
13429 break;
13430 case GDB_GS:
13431@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13432 case 'k':
13433 /* clear the trace bit */
13434 linux_regs->flags &= ~X86_EFLAGS_TF;
13435- atomic_set(&kgdb_cpu_doing_single_step, -1);
13436+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13437
13438 /* set the trace bit if we're stepping */
13439 if (remcomInBuffer[0] == 's') {
13440 linux_regs->flags |= X86_EFLAGS_TF;
13441- atomic_set(&kgdb_cpu_doing_single_step,
13442+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13443 raw_smp_processor_id());
13444 }
13445
13446@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13447 return NOTIFY_DONE;
13448
13449 case DIE_DEBUG:
13450- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13451+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13452 if (user_mode(regs))
13453 return single_step_cont(regs, args);
13454 break;
13455diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13456--- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13457+++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13458@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13459 } __attribute__((packed)) *insn;
13460
13461 insn = (struct __arch_relative_insn *)from;
13462+
13463+ pax_open_kernel();
13464 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13465 insn->op = op;
13466+ pax_close_kernel();
13467 }
13468
13469 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13470@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13471 kprobe_opcode_t opcode;
13472 kprobe_opcode_t *orig_opcodes = opcodes;
13473
13474- if (search_exception_tables((unsigned long)opcodes))
13475+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13476 return 0; /* Page fault may occur on this address. */
13477
13478 retry:
13479@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13480 }
13481 }
13482 insn_get_length(&insn);
13483+ pax_open_kernel();
13484 memcpy(dest, insn.kaddr, insn.length);
13485+ pax_close_kernel();
13486
13487 #ifdef CONFIG_X86_64
13488 if (insn_rip_relative(&insn)) {
13489@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13490 (u8 *) dest;
13491 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13492 disp = (u8 *) dest + insn_offset_displacement(&insn);
13493+ pax_open_kernel();
13494 *(s32 *) disp = (s32) newdisp;
13495+ pax_close_kernel();
13496 }
13497 #endif
13498 return insn.length;
13499@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13500 */
13501 __copy_instruction(p->ainsn.insn, p->addr, 0);
13502
13503- if (can_boost(p->addr))
13504+ if (can_boost(ktla_ktva(p->addr)))
13505 p->ainsn.boostable = 0;
13506 else
13507 p->ainsn.boostable = -1;
13508
13509- p->opcode = *p->addr;
13510+ p->opcode = *(ktla_ktva(p->addr));
13511 }
13512
13513 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13514@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13515 * nor set current_kprobe, because it doesn't use single
13516 * stepping.
13517 */
13518- regs->ip = (unsigned long)p->ainsn.insn;
13519+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13520 preempt_enable_no_resched();
13521 return;
13522 }
13523@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13524 if (p->opcode == BREAKPOINT_INSTRUCTION)
13525 regs->ip = (unsigned long)p->addr;
13526 else
13527- regs->ip = (unsigned long)p->ainsn.insn;
13528+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13529 }
13530
13531 /*
13532@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13533 setup_singlestep(p, regs, kcb, 0);
13534 return 1;
13535 }
13536- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13537+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13538 /*
13539 * The breakpoint instruction was removed right
13540 * after we hit it. Another cpu has removed
13541@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13542 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13543 {
13544 unsigned long *tos = stack_addr(regs);
13545- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13546+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13547 unsigned long orig_ip = (unsigned long)p->addr;
13548 kprobe_opcode_t *insn = p->ainsn.insn;
13549
13550@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13551 struct die_args *args = data;
13552 int ret = NOTIFY_DONE;
13553
13554- if (args->regs && user_mode_vm(args->regs))
13555+ if (args->regs && user_mode(args->regs))
13556 return ret;
13557
13558 switch (val) {
13559@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13560 * Verify if the address gap is in 2GB range, because this uses
13561 * a relative jump.
13562 */
13563- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13564+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13565 if (abs(rel) > 0x7fffffff)
13566 return -ERANGE;
13567
13568@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13569 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13570
13571 /* Set probe function call */
13572- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13573+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13574
13575 /* Set returning jmp instruction at the tail of out-of-line buffer */
13576 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13577- (u8 *)op->kp.addr + op->optinsn.size);
13578+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13579
13580 flush_icache_range((unsigned long) buf,
13581 (unsigned long) buf + TMPL_END_IDX +
13582@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13583 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13584
13585 /* Backup instructions which will be replaced by jump address */
13586- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13587+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13588 RELATIVE_ADDR_SIZE);
13589
13590 insn_buf[0] = RELATIVEJUMP_OPCODE;
13591diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13592--- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13593+++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13594@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13595 if (reload) {
13596 #ifdef CONFIG_SMP
13597 preempt_disable();
13598- load_LDT(pc);
13599+ load_LDT_nolock(pc);
13600 if (!cpumask_equal(mm_cpumask(current->mm),
13601 cpumask_of(smp_processor_id())))
13602 smp_call_function(flush_ldt, current->mm, 1);
13603 preempt_enable();
13604 #else
13605- load_LDT(pc);
13606+ load_LDT_nolock(pc);
13607 #endif
13608 }
13609 if (oldsize) {
13610@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13611 return err;
13612
13613 for (i = 0; i < old->size; i++)
13614- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13615+ write_ldt_entry(new->ldt, i, old->ldt + i);
13616 return 0;
13617 }
13618
13619@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13620 retval = copy_ldt(&mm->context, &old_mm->context);
13621 mutex_unlock(&old_mm->context.lock);
13622 }
13623+
13624+ if (tsk == current) {
13625+ mm->context.vdso = 0;
13626+
13627+#ifdef CONFIG_X86_32
13628+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13629+ mm->context.user_cs_base = 0UL;
13630+ mm->context.user_cs_limit = ~0UL;
13631+
13632+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13633+ cpus_clear(mm->context.cpu_user_cs_mask);
13634+#endif
13635+
13636+#endif
13637+#endif
13638+
13639+ }
13640+
13641 return retval;
13642 }
13643
13644@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13645 }
13646 }
13647
13648+#ifdef CONFIG_PAX_SEGMEXEC
13649+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13650+ error = -EINVAL;
13651+ goto out_unlock;
13652+ }
13653+#endif
13654+
13655 fill_ldt(&ldt, &ldt_info);
13656 if (oldmode)
13657 ldt.avl = 0;
13658diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13659--- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13660+++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13661@@ -27,7 +27,7 @@
13662 #include <asm/cacheflush.h>
13663 #include <asm/debugreg.h>
13664
13665-static void set_idt(void *newidt, __u16 limit)
13666+static void set_idt(struct desc_struct *newidt, __u16 limit)
13667 {
13668 struct desc_ptr curidt;
13669
13670@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13671 }
13672
13673
13674-static void set_gdt(void *newgdt, __u16 limit)
13675+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13676 {
13677 struct desc_ptr curgdt;
13678
13679@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13680 }
13681
13682 control_page = page_address(image->control_code_page);
13683- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13684+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13685
13686 relocate_kernel_ptr = control_page;
13687 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13688diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13689--- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13690+++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13691@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13692
13693 static int get_ucode_user(void *to, const void *from, size_t n)
13694 {
13695- return copy_from_user(to, from, n);
13696+ return copy_from_user(to, (__force const void __user *)from, n);
13697 }
13698
13699 static enum ucode_state
13700 request_microcode_user(int cpu, const void __user *buf, size_t size)
13701 {
13702- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13703+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13704 }
13705
13706 static void microcode_fini_cpu(int cpu)
13707diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13708--- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13709+++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13710@@ -35,21 +35,66 @@
13711 #define DEBUGP(fmt...)
13712 #endif
13713
13714-void *module_alloc(unsigned long size)
13715+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13716 {
13717 if (PAGE_ALIGN(size) > MODULES_LEN)
13718 return NULL;
13719 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13720- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13721+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13722 -1, __builtin_return_address(0));
13723 }
13724
13725+void *module_alloc(unsigned long size)
13726+{
13727+
13728+#ifdef CONFIG_PAX_KERNEXEC
13729+ return __module_alloc(size, PAGE_KERNEL);
13730+#else
13731+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13732+#endif
13733+
13734+}
13735+
13736 /* Free memory returned from module_alloc */
13737 void module_free(struct module *mod, void *module_region)
13738 {
13739 vfree(module_region);
13740 }
13741
13742+#ifdef CONFIG_PAX_KERNEXEC
13743+#ifdef CONFIG_X86_32
13744+void *module_alloc_exec(unsigned long size)
13745+{
13746+ struct vm_struct *area;
13747+
13748+ if (size == 0)
13749+ return NULL;
13750+
13751+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13752+ return area ? area->addr : NULL;
13753+}
13754+EXPORT_SYMBOL(module_alloc_exec);
13755+
13756+void module_free_exec(struct module *mod, void *module_region)
13757+{
13758+ vunmap(module_region);
13759+}
13760+EXPORT_SYMBOL(module_free_exec);
13761+#else
13762+void module_free_exec(struct module *mod, void *module_region)
13763+{
13764+ module_free(mod, module_region);
13765+}
13766+EXPORT_SYMBOL(module_free_exec);
13767+
13768+void *module_alloc_exec(unsigned long size)
13769+{
13770+ return __module_alloc(size, PAGE_KERNEL_RX);
13771+}
13772+EXPORT_SYMBOL(module_alloc_exec);
13773+#endif
13774+#endif
13775+
13776 /* We don't need anything special. */
13777 int module_frob_arch_sections(Elf_Ehdr *hdr,
13778 Elf_Shdr *sechdrs,
13779@@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13780 unsigned int i;
13781 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13782 Elf32_Sym *sym;
13783- uint32_t *location;
13784+ uint32_t *plocation, location;
13785
13786 DEBUGP("Applying relocate section %u to %u\n", relsec,
13787 sechdrs[relsec].sh_info);
13788 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13789 /* This is where to make the change */
13790- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13791- + rel[i].r_offset;
13792+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13793+ location = (uint32_t)plocation;
13794+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13795+ plocation = ktla_ktva((void *)plocation);
13796 /* This is the symbol it is referring to. Note that all
13797 undefined symbols have been resolved. */
13798 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13799@@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13800 switch (ELF32_R_TYPE(rel[i].r_info)) {
13801 case R_386_32:
13802 /* We add the value into the location given */
13803- *location += sym->st_value;
13804+ pax_open_kernel();
13805+ *plocation += sym->st_value;
13806+ pax_close_kernel();
13807 break;
13808 case R_386_PC32:
13809 /* Add the value, subtract its postition */
13810- *location += sym->st_value - (uint32_t)location;
13811+ pax_open_kernel();
13812+ *plocation += sym->st_value - location;
13813+ pax_close_kernel();
13814 break;
13815 default:
13816 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13817@@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13818 case R_X86_64_NONE:
13819 break;
13820 case R_X86_64_64:
13821+ pax_open_kernel();
13822 *(u64 *)loc = val;
13823+ pax_close_kernel();
13824 break;
13825 case R_X86_64_32:
13826+ pax_open_kernel();
13827 *(u32 *)loc = val;
13828+ pax_close_kernel();
13829 if (val != *(u32 *)loc)
13830 goto overflow;
13831 break;
13832 case R_X86_64_32S:
13833+ pax_open_kernel();
13834 *(s32 *)loc = val;
13835+ pax_close_kernel();
13836 if ((s64)val != *(s32 *)loc)
13837 goto overflow;
13838 break;
13839 case R_X86_64_PC32:
13840 val -= (u64)loc;
13841+ pax_open_kernel();
13842 *(u32 *)loc = val;
13843+ pax_close_kernel();
13844+
13845 #if 0
13846 if ((s64)val != *(s32 *)loc)
13847 goto overflow;
13848diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13849--- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13850+++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13851@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13852 {
13853 return x;
13854 }
13855+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13856+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13857+#endif
13858
13859 void __init default_banner(void)
13860 {
13861@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13862 * corresponding structure. */
13863 static void *get_call_destination(u8 type)
13864 {
13865- struct paravirt_patch_template tmpl = {
13866+ const struct paravirt_patch_template tmpl = {
13867 .pv_init_ops = pv_init_ops,
13868 .pv_time_ops = pv_time_ops,
13869 .pv_cpu_ops = pv_cpu_ops,
13870@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13871 .pv_lock_ops = pv_lock_ops,
13872 #endif
13873 };
13874+
13875+ pax_track_stack();
13876+
13877 return *((void **)&tmpl + type);
13878 }
13879
13880@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13881 if (opfunc == NULL)
13882 /* If there's no function, patch it with a ud2a (BUG) */
13883 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13884- else if (opfunc == _paravirt_nop)
13885+ else if (opfunc == (void *)_paravirt_nop)
13886 /* If the operation is a nop, then nop the callsite */
13887 ret = paravirt_patch_nop();
13888
13889 /* identity functions just return their single argument */
13890- else if (opfunc == _paravirt_ident_32)
13891+ else if (opfunc == (void *)_paravirt_ident_32)
13892 ret = paravirt_patch_ident_32(insnbuf, len);
13893- else if (opfunc == _paravirt_ident_64)
13894+ else if (opfunc == (void *)_paravirt_ident_64)
13895 ret = paravirt_patch_ident_64(insnbuf, len);
13896+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13897+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13898+ ret = paravirt_patch_ident_64(insnbuf, len);
13899+#endif
13900
13901 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13902 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13903@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13904 if (insn_len > len || start == NULL)
13905 insn_len = len;
13906 else
13907- memcpy(insnbuf, start, insn_len);
13908+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13909
13910 return insn_len;
13911 }
13912@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13913 preempt_enable();
13914 }
13915
13916-struct pv_info pv_info = {
13917+struct pv_info pv_info __read_only = {
13918 .name = "bare hardware",
13919 .paravirt_enabled = 0,
13920 .kernel_rpl = 0,
13921 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13922 };
13923
13924-struct pv_init_ops pv_init_ops = {
13925+struct pv_init_ops pv_init_ops __read_only = {
13926 .patch = native_patch,
13927 };
13928
13929-struct pv_time_ops pv_time_ops = {
13930+struct pv_time_ops pv_time_ops __read_only = {
13931 .sched_clock = native_sched_clock,
13932 };
13933
13934-struct pv_irq_ops pv_irq_ops = {
13935+struct pv_irq_ops pv_irq_ops __read_only = {
13936 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13937 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13938 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13939@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13940 #endif
13941 };
13942
13943-struct pv_cpu_ops pv_cpu_ops = {
13944+struct pv_cpu_ops pv_cpu_ops __read_only = {
13945 .cpuid = native_cpuid,
13946 .get_debugreg = native_get_debugreg,
13947 .set_debugreg = native_set_debugreg,
13948@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13949 .end_context_switch = paravirt_nop,
13950 };
13951
13952-struct pv_apic_ops pv_apic_ops = {
13953+struct pv_apic_ops pv_apic_ops __read_only = {
13954 #ifdef CONFIG_X86_LOCAL_APIC
13955 .startup_ipi_hook = paravirt_nop,
13956 #endif
13957 };
13958
13959-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13960+#ifdef CONFIG_X86_32
13961+#ifdef CONFIG_X86_PAE
13962+/* 64-bit pagetable entries */
13963+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13964+#else
13965 /* 32-bit pagetable entries */
13966 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13967+#endif
13968 #else
13969 /* 64-bit pagetable entries */
13970 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13971 #endif
13972
13973-struct pv_mmu_ops pv_mmu_ops = {
13974+struct pv_mmu_ops pv_mmu_ops __read_only = {
13975
13976 .read_cr2 = native_read_cr2,
13977 .write_cr2 = native_write_cr2,
13978@@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13979 },
13980
13981 .set_fixmap = native_set_fixmap,
13982+
13983+#ifdef CONFIG_PAX_KERNEXEC
13984+ .pax_open_kernel = native_pax_open_kernel,
13985+ .pax_close_kernel = native_pax_close_kernel,
13986+#endif
13987+
13988 };
13989
13990 EXPORT_SYMBOL_GPL(pv_time_ops);
13991diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
13992--- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
13993+++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
13994@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13995 arch_spin_lock(lock);
13996 }
13997
13998-struct pv_lock_ops pv_lock_ops = {
13999+struct pv_lock_ops pv_lock_ops __read_only = {
14000 #ifdef CONFIG_SMP
14001 .spin_is_locked = __ticket_spin_is_locked,
14002 .spin_is_contended = __ticket_spin_is_contended,
14003diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14004--- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14005+++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14006@@ -2,7 +2,7 @@
14007 #include <asm/iommu_table.h>
14008 #include <linux/string.h>
14009 #include <linux/kallsyms.h>
14010-
14011+#include <linux/sched.h>
14012
14013 #define DEBUG 1
14014
14015@@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14016 char sym_p[KSYM_SYMBOL_LEN];
14017 char sym_q[KSYM_SYMBOL_LEN];
14018
14019+ pax_track_stack();
14020+
14021 /* Simple cyclic dependency checker. */
14022 for (p = start; p < finish; p++) {
14023 q = find_dependents_of(start, finish, p);
14024diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14025--- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14026+++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14027@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14028 unsigned long thread_saved_pc(struct task_struct *tsk)
14029 {
14030 return ((unsigned long *)tsk->thread.sp)[3];
14031+//XXX return tsk->thread.eip;
14032 }
14033
14034 #ifndef CONFIG_SMP
14035@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14036 unsigned long sp;
14037 unsigned short ss, gs;
14038
14039- if (user_mode_vm(regs)) {
14040+ if (user_mode(regs)) {
14041 sp = regs->sp;
14042 ss = regs->ss & 0xffff;
14043- gs = get_user_gs(regs);
14044 } else {
14045 sp = kernel_stack_pointer(regs);
14046 savesegment(ss, ss);
14047- savesegment(gs, gs);
14048 }
14049+ gs = get_user_gs(regs);
14050
14051 show_regs_common();
14052
14053@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14054 struct task_struct *tsk;
14055 int err;
14056
14057- childregs = task_pt_regs(p);
14058+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14059 *childregs = *regs;
14060 childregs->ax = 0;
14061 childregs->sp = sp;
14062
14063 p->thread.sp = (unsigned long) childregs;
14064 p->thread.sp0 = (unsigned long) (childregs+1);
14065+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14066
14067 p->thread.ip = (unsigned long) ret_from_fork;
14068
14069@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14070 struct thread_struct *prev = &prev_p->thread,
14071 *next = &next_p->thread;
14072 int cpu = smp_processor_id();
14073- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14074+ struct tss_struct *tss = init_tss + cpu;
14075 bool preload_fpu;
14076
14077 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14078@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14079 */
14080 lazy_save_gs(prev->gs);
14081
14082+#ifdef CONFIG_PAX_MEMORY_UDEREF
14083+ __set_fs(task_thread_info(next_p)->addr_limit);
14084+#endif
14085+
14086 /*
14087 * Load the per-thread Thread-Local Storage descriptor.
14088 */
14089@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14090 */
14091 arch_end_context_switch(next_p);
14092
14093+ percpu_write(current_task, next_p);
14094+ percpu_write(current_tinfo, &next_p->tinfo);
14095+
14096 if (preload_fpu)
14097 __math_state_restore();
14098
14099@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14100 if (prev->gs | next->gs)
14101 lazy_load_gs(next->gs);
14102
14103- percpu_write(current_task, next_p);
14104-
14105 return prev_p;
14106 }
14107
14108@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14109 } while (count++ < 16);
14110 return 0;
14111 }
14112-
14113diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14114--- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14115+++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14116@@ -87,7 +87,7 @@ static void __exit_idle(void)
14117 void exit_idle(void)
14118 {
14119 /* idle loop has pid 0 */
14120- if (current->pid)
14121+ if (task_pid_nr(current))
14122 return;
14123 __exit_idle();
14124 }
14125@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14126 struct pt_regs *childregs;
14127 struct task_struct *me = current;
14128
14129- childregs = ((struct pt_regs *)
14130- (THREAD_SIZE + task_stack_page(p))) - 1;
14131+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14132 *childregs = *regs;
14133
14134 childregs->ax = 0;
14135@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14136 p->thread.sp = (unsigned long) childregs;
14137 p->thread.sp0 = (unsigned long) (childregs+1);
14138 p->thread.usersp = me->thread.usersp;
14139+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14140
14141 set_tsk_thread_flag(p, TIF_FORK);
14142
14143@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14144 struct thread_struct *prev = &prev_p->thread;
14145 struct thread_struct *next = &next_p->thread;
14146 int cpu = smp_processor_id();
14147- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14148+ struct tss_struct *tss = init_tss + cpu;
14149 unsigned fsindex, gsindex;
14150 bool preload_fpu;
14151
14152@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14153 prev->usersp = percpu_read(old_rsp);
14154 percpu_write(old_rsp, next->usersp);
14155 percpu_write(current_task, next_p);
14156+ percpu_write(current_tinfo, &next_p->tinfo);
14157
14158- percpu_write(kernel_stack,
14159- (unsigned long)task_stack_page(next_p) +
14160- THREAD_SIZE - KERNEL_STACK_OFFSET);
14161+ percpu_write(kernel_stack, next->sp0);
14162
14163 /*
14164 * Now maybe reload the debug registers and handle I/O bitmaps
14165@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14166 if (!p || p == current || p->state == TASK_RUNNING)
14167 return 0;
14168 stack = (unsigned long)task_stack_page(p);
14169- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14170+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14171 return 0;
14172 fp = *(u64 *)(p->thread.sp);
14173 do {
14174- if (fp < (unsigned long)stack ||
14175- fp >= (unsigned long)stack+THREAD_SIZE)
14176+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14177 return 0;
14178 ip = *(u64 *)(fp+8);
14179 if (!in_sched_functions(ip))
14180diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14181--- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14182+++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14183@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14184
14185 void free_thread_info(struct thread_info *ti)
14186 {
14187- free_thread_xstate(ti->task);
14188 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14189 }
14190
14191+static struct kmem_cache *task_struct_cachep;
14192+
14193 void arch_task_cache_init(void)
14194 {
14195- task_xstate_cachep =
14196- kmem_cache_create("task_xstate", xstate_size,
14197+ /* create a slab on which task_structs can be allocated */
14198+ task_struct_cachep =
14199+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14200+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14201+
14202+ task_xstate_cachep =
14203+ kmem_cache_create("task_xstate", xstate_size,
14204 __alignof__(union thread_xstate),
14205- SLAB_PANIC | SLAB_NOTRACK, NULL);
14206+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14207+}
14208+
14209+struct task_struct *alloc_task_struct_node(int node)
14210+{
14211+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14212+}
14213+
14214+void free_task_struct(struct task_struct *task)
14215+{
14216+ free_thread_xstate(task);
14217+ kmem_cache_free(task_struct_cachep, task);
14218 }
14219
14220 /*
14221@@ -70,7 +87,7 @@ void exit_thread(void)
14222 unsigned long *bp = t->io_bitmap_ptr;
14223
14224 if (bp) {
14225- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14226+ struct tss_struct *tss = init_tss + get_cpu();
14227
14228 t->io_bitmap_ptr = NULL;
14229 clear_thread_flag(TIF_IO_BITMAP);
14230@@ -106,7 +123,7 @@ void show_regs_common(void)
14231
14232 printk(KERN_CONT "\n");
14233 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14234- current->pid, current->comm, print_tainted(),
14235+ task_pid_nr(current), current->comm, print_tainted(),
14236 init_utsname()->release,
14237 (int)strcspn(init_utsname()->version, " "),
14238 init_utsname()->version);
14239@@ -120,6 +137,9 @@ void flush_thread(void)
14240 {
14241 struct task_struct *tsk = current;
14242
14243+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14244+ loadsegment(gs, 0);
14245+#endif
14246 flush_ptrace_hw_breakpoint(tsk);
14247 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14248 /*
14249@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14250 regs.di = (unsigned long) arg;
14251
14252 #ifdef CONFIG_X86_32
14253- regs.ds = __USER_DS;
14254- regs.es = __USER_DS;
14255+ regs.ds = __KERNEL_DS;
14256+ regs.es = __KERNEL_DS;
14257 regs.fs = __KERNEL_PERCPU;
14258- regs.gs = __KERNEL_STACK_CANARY;
14259+ savesegment(gs, regs.gs);
14260 #else
14261 regs.ss = __KERNEL_DS;
14262 #endif
14263@@ -401,7 +421,7 @@ void default_idle(void)
14264 EXPORT_SYMBOL(default_idle);
14265 #endif
14266
14267-void stop_this_cpu(void *dummy)
14268+__noreturn void stop_this_cpu(void *dummy)
14269 {
14270 local_irq_disable();
14271 /*
14272@@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14273 }
14274 early_param("idle", idle_setup);
14275
14276-unsigned long arch_align_stack(unsigned long sp)
14277+#ifdef CONFIG_PAX_RANDKSTACK
14278+asmlinkage void pax_randomize_kstack(void)
14279 {
14280- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14281- sp -= get_random_int() % 8192;
14282- return sp & ~0xf;
14283-}
14284+ struct thread_struct *thread = &current->thread;
14285+ unsigned long time;
14286
14287-unsigned long arch_randomize_brk(struct mm_struct *mm)
14288-{
14289- unsigned long range_end = mm->brk + 0x02000000;
14290- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14291-}
14292+ if (!randomize_va_space)
14293+ return;
14294+
14295+ rdtscl(time);
14296+
14297+ /* P4 seems to return a 0 LSB, ignore it */
14298+#ifdef CONFIG_MPENTIUM4
14299+ time &= 0x3EUL;
14300+ time <<= 2;
14301+#elif defined(CONFIG_X86_64)
14302+ time &= 0xFUL;
14303+ time <<= 4;
14304+#else
14305+ time &= 0x1FUL;
14306+ time <<= 3;
14307+#endif
14308+
14309+ thread->sp0 ^= time;
14310+ load_sp0(init_tss + smp_processor_id(), thread);
14311
14312+#ifdef CONFIG_X86_64
14313+ percpu_write(kernel_stack, thread->sp0);
14314+#endif
14315+}
14316+#endif
14317diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14318--- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14319+++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14320@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14321 unsigned long addr, unsigned long data)
14322 {
14323 int ret;
14324- unsigned long __user *datap = (unsigned long __user *)data;
14325+ unsigned long __user *datap = (__force unsigned long __user *)data;
14326
14327 switch (request) {
14328 /* read the word at location addr in the USER area. */
14329@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14330 if ((int) addr < 0)
14331 return -EIO;
14332 ret = do_get_thread_area(child, addr,
14333- (struct user_desc __user *)data);
14334+ (__force struct user_desc __user *) data);
14335 break;
14336
14337 case PTRACE_SET_THREAD_AREA:
14338 if ((int) addr < 0)
14339 return -EIO;
14340 ret = do_set_thread_area(child, addr,
14341- (struct user_desc __user *)data, 0);
14342+ (__force struct user_desc __user *) data, 0);
14343 break;
14344 #endif
14345
14346@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14347 memset(info, 0, sizeof(*info));
14348 info->si_signo = SIGTRAP;
14349 info->si_code = si_code;
14350- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14351+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14352 }
14353
14354 void user_single_step_siginfo(struct task_struct *tsk,
14355@@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14356 * We must return the syscall number to actually look up in the table.
14357 * This can be -1L to skip running any syscall at all.
14358 */
14359-asmregparm long syscall_trace_enter(struct pt_regs *regs)
14360+long syscall_trace_enter(struct pt_regs *regs)
14361 {
14362 long ret = 0;
14363
14364@@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14365 return ret ?: regs->orig_ax;
14366 }
14367
14368-asmregparm void syscall_trace_leave(struct pt_regs *regs)
14369+void syscall_trace_leave(struct pt_regs *regs)
14370 {
14371 bool step;
14372
14373diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14374--- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14375+++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14376@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14377 return pv_tsc_khz;
14378 }
14379
14380-static atomic64_t last_value = ATOMIC64_INIT(0);
14381+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14382
14383 void pvclock_resume(void)
14384 {
14385- atomic64_set(&last_value, 0);
14386+ atomic64_set_unchecked(&last_value, 0);
14387 }
14388
14389 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14390@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14391 * updating at the same time, and one of them could be slightly behind,
14392 * making the assumption that last_value always go forward fail to hold.
14393 */
14394- last = atomic64_read(&last_value);
14395+ last = atomic64_read_unchecked(&last_value);
14396 do {
14397 if (ret < last)
14398 return last;
14399- last = atomic64_cmpxchg(&last_value, last, ret);
14400+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14401 } while (unlikely(last != ret));
14402
14403 return ret;
14404diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14405--- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14406+++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14407@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14408 EXPORT_SYMBOL(pm_power_off);
14409
14410 static const struct desc_ptr no_idt = {};
14411-static int reboot_mode;
14412+static unsigned short reboot_mode;
14413 enum reboot_type reboot_type = BOOT_KBD;
14414 int reboot_force;
14415
14416@@ -307,13 +307,17 @@ core_initcall(reboot_init);
14417 extern const unsigned char machine_real_restart_asm[];
14418 extern const u64 machine_real_restart_gdt[3];
14419
14420-void machine_real_restart(unsigned int type)
14421+__noreturn void machine_real_restart(unsigned int type)
14422 {
14423 void *restart_va;
14424 unsigned long restart_pa;
14425- void (*restart_lowmem)(unsigned int);
14426+ void (* __noreturn restart_lowmem)(unsigned int);
14427 u64 *lowmem_gdt;
14428
14429+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14430+ struct desc_struct *gdt;
14431+#endif
14432+
14433 local_irq_disable();
14434
14435 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14436@@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14437 boot)". This seems like a fairly standard thing that gets set by
14438 REBOOT.COM programs, and the previous reset routine did this
14439 too. */
14440- *((unsigned short *)0x472) = reboot_mode;
14441+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14442
14443 /* Patch the GDT in the low memory trampoline */
14444 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14445
14446 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14447 restart_pa = virt_to_phys(restart_va);
14448- restart_lowmem = (void (*)(unsigned int))restart_pa;
14449+ restart_lowmem = (void *)restart_pa;
14450
14451 /* GDT[0]: GDT self-pointer */
14452 lowmem_gdt[0] =
14453@@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14454 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14455
14456 /* Jump to the identity-mapped low memory code */
14457+
14458+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14459+ gdt = get_cpu_gdt_table(smp_processor_id());
14460+ pax_open_kernel();
14461+#ifdef CONFIG_PAX_MEMORY_UDEREF
14462+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14463+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14464+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14465+#endif
14466+#ifdef CONFIG_PAX_KERNEXEC
14467+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14468+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14469+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14470+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14471+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14472+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14473+#endif
14474+ pax_close_kernel();
14475+#endif
14476+
14477+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14478+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14479+ unreachable();
14480+#else
14481 restart_lowmem(type);
14482+#endif
14483+
14484 }
14485 #ifdef CONFIG_APM_MODULE
14486 EXPORT_SYMBOL(machine_real_restart);
14487@@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14488 {
14489 }
14490
14491-static void native_machine_emergency_restart(void)
14492+__noreturn static void native_machine_emergency_restart(void)
14493 {
14494 int i;
14495
14496@@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14497 #endif
14498 }
14499
14500-static void __machine_emergency_restart(int emergency)
14501+static __noreturn void __machine_emergency_restart(int emergency)
14502 {
14503 reboot_emergency = emergency;
14504 machine_ops.emergency_restart();
14505 }
14506
14507-static void native_machine_restart(char *__unused)
14508+static __noreturn void native_machine_restart(char *__unused)
14509 {
14510 printk("machine restart\n");
14511
14512@@ -616,7 +646,7 @@ static void native_machine_restart(char
14513 __machine_emergency_restart(0);
14514 }
14515
14516-static void native_machine_halt(void)
14517+static __noreturn void native_machine_halt(void)
14518 {
14519 /* stop other cpus and apics */
14520 machine_shutdown();
14521@@ -627,7 +657,7 @@ static void native_machine_halt(void)
14522 stop_this_cpu(NULL);
14523 }
14524
14525-static void native_machine_power_off(void)
14526+__noreturn static void native_machine_power_off(void)
14527 {
14528 if (pm_power_off) {
14529 if (!reboot_force)
14530@@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14531 }
14532 /* a fallback in case there is no PM info available */
14533 tboot_shutdown(TB_SHUTDOWN_HALT);
14534+ unreachable();
14535 }
14536
14537 struct machine_ops machine_ops = {
14538diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14539--- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14540+++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14541@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14542 * area (640->1Mb) as ram even though it is not.
14543 * take them out.
14544 */
14545- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14546+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14547 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14548 }
14549
14550@@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14551
14552 if (!boot_params.hdr.root_flags)
14553 root_mountflags &= ~MS_RDONLY;
14554- init_mm.start_code = (unsigned long) _text;
14555- init_mm.end_code = (unsigned long) _etext;
14556+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14557+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14558 init_mm.end_data = (unsigned long) _edata;
14559 init_mm.brk = _brk_end;
14560
14561- code_resource.start = virt_to_phys(_text);
14562- code_resource.end = virt_to_phys(_etext)-1;
14563- data_resource.start = virt_to_phys(_etext);
14564+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14565+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14566+ data_resource.start = virt_to_phys(_sdata);
14567 data_resource.end = virt_to_phys(_edata)-1;
14568 bss_resource.start = virt_to_phys(&__bss_start);
14569 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14570diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14571--- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14572+++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14573@@ -21,19 +21,17 @@
14574 #include <asm/cpu.h>
14575 #include <asm/stackprotector.h>
14576
14577-DEFINE_PER_CPU(int, cpu_number);
14578+#ifdef CONFIG_SMP
14579+DEFINE_PER_CPU(unsigned int, cpu_number);
14580 EXPORT_PER_CPU_SYMBOL(cpu_number);
14581+#endif
14582
14583-#ifdef CONFIG_X86_64
14584 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14585-#else
14586-#define BOOT_PERCPU_OFFSET 0
14587-#endif
14588
14589 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14590 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14591
14592-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14593+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14594 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14595 };
14596 EXPORT_SYMBOL(__per_cpu_offset);
14597@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14598 {
14599 #ifdef CONFIG_X86_32
14600 struct desc_struct gdt;
14601+ unsigned long base = per_cpu_offset(cpu);
14602
14603- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14604- 0x2 | DESCTYPE_S, 0x8);
14605- gdt.s = 1;
14606+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14607+ 0x83 | DESCTYPE_S, 0xC);
14608 write_gdt_entry(get_cpu_gdt_table(cpu),
14609 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14610 #endif
14611@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14612 /* alrighty, percpu areas up and running */
14613 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14614 for_each_possible_cpu(cpu) {
14615+#ifdef CONFIG_CC_STACKPROTECTOR
14616+#ifdef CONFIG_X86_32
14617+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14618+#endif
14619+#endif
14620 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14621 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14622 per_cpu(cpu_number, cpu) = cpu;
14623@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14624 */
14625 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14626 #endif
14627+#ifdef CONFIG_CC_STACKPROTECTOR
14628+#ifdef CONFIG_X86_32
14629+ if (!cpu)
14630+ per_cpu(stack_canary.canary, cpu) = canary;
14631+#endif
14632+#endif
14633 /*
14634 * Up to this point, the boot CPU has been using .init.data
14635 * area. Reload any changed state for the boot CPU.
14636diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14637--- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14638+++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14639@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14640 * Align the stack pointer according to the i386 ABI,
14641 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14642 */
14643- sp = ((sp + 4) & -16ul) - 4;
14644+ sp = ((sp - 12) & -16ul) - 4;
14645 #else /* !CONFIG_X86_32 */
14646 sp = round_down(sp, 16) - 8;
14647 #endif
14648@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14649 * Return an always-bogus address instead so we will die with SIGSEGV.
14650 */
14651 if (onsigstack && !likely(on_sig_stack(sp)))
14652- return (void __user *)-1L;
14653+ return (__force void __user *)-1L;
14654
14655 /* save i387 state */
14656 if (used_math() && save_i387_xstate(*fpstate) < 0)
14657- return (void __user *)-1L;
14658+ return (__force void __user *)-1L;
14659
14660 return (void __user *)sp;
14661 }
14662@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14663 }
14664
14665 if (current->mm->context.vdso)
14666- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14667+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14668 else
14669- restorer = &frame->retcode;
14670+ restorer = (void __user *)&frame->retcode;
14671 if (ka->sa.sa_flags & SA_RESTORER)
14672 restorer = ka->sa.sa_restorer;
14673
14674@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14675 * reasons and because gdb uses it as a signature to notice
14676 * signal handler stack frames.
14677 */
14678- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14679+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14680
14681 if (err)
14682 return -EFAULT;
14683@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14684 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14685
14686 /* Set up to return from userspace. */
14687- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14688+ if (current->mm->context.vdso)
14689+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14690+ else
14691+ restorer = (void __user *)&frame->retcode;
14692 if (ka->sa.sa_flags & SA_RESTORER)
14693 restorer = ka->sa.sa_restorer;
14694 put_user_ex(restorer, &frame->pretcode);
14695@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14696 * reasons and because gdb uses it as a signature to notice
14697 * signal handler stack frames.
14698 */
14699- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14700+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14701 } put_user_catch(err);
14702
14703 if (err)
14704@@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14705 int signr;
14706 sigset_t *oldset;
14707
14708+ pax_track_stack();
14709+
14710 /*
14711 * We want the common case to go fast, which is why we may in certain
14712 * cases get here from kernel mode. Just return without doing anything
14713@@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14714 * X86_32: vm86 regs switched out by assembly code before reaching
14715 * here, so testing against kernel CS suffices.
14716 */
14717- if (!user_mode(regs))
14718+ if (!user_mode_novm(regs))
14719 return;
14720
14721 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14722diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14723--- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14724+++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14725@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14726 set_idle_for_cpu(cpu, c_idle.idle);
14727 do_rest:
14728 per_cpu(current_task, cpu) = c_idle.idle;
14729+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14730 #ifdef CONFIG_X86_32
14731 /* Stack for startup_32 can be just as for start_secondary onwards */
14732 irq_ctx_init(cpu);
14733 #else
14734 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14735 initial_gs = per_cpu_offset(cpu);
14736- per_cpu(kernel_stack, cpu) =
14737- (unsigned long)task_stack_page(c_idle.idle) -
14738- KERNEL_STACK_OFFSET + THREAD_SIZE;
14739+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14740 #endif
14741+
14742+ pax_open_kernel();
14743 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14744+ pax_close_kernel();
14745+
14746 initial_code = (unsigned long)start_secondary;
14747 stack_start = c_idle.idle->thread.sp;
14748
14749@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14750
14751 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14752
14753+#ifdef CONFIG_PAX_PER_CPU_PGD
14754+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14755+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14756+ KERNEL_PGD_PTRS);
14757+#endif
14758+
14759 err = do_boot_cpu(apicid, cpu);
14760 if (err) {
14761 pr_debug("do_boot_cpu failed %d\n", err);
14762diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14763--- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14764+++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14765@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14766 struct desc_struct *desc;
14767 unsigned long base;
14768
14769- seg &= ~7UL;
14770+ seg >>= 3;
14771
14772 mutex_lock(&child->mm->context.lock);
14773- if (unlikely((seg >> 3) >= child->mm->context.size))
14774+ if (unlikely(seg >= child->mm->context.size))
14775 addr = -1L; /* bogus selector, access would fault */
14776 else {
14777 desc = child->mm->context.ldt + seg;
14778@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14779 addr += base;
14780 }
14781 mutex_unlock(&child->mm->context.lock);
14782- }
14783+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14784+ addr = ktla_ktva(addr);
14785
14786 return addr;
14787 }
14788@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14789 unsigned char opcode[15];
14790 unsigned long addr = convert_ip_to_linear(child, regs);
14791
14792+ if (addr == -EINVAL)
14793+ return 0;
14794+
14795 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14796 for (i = 0; i < copied; i++) {
14797 switch (opcode[i]) {
14798@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14799
14800 #ifdef CONFIG_X86_64
14801 case 0x40 ... 0x4f:
14802- if (regs->cs != __USER_CS)
14803+ if ((regs->cs & 0xffff) != __USER_CS)
14804 /* 32-bit mode: register increment */
14805 return 0;
14806 /* 64-bit mode: REX prefix */
14807diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14808--- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14809+++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14810@@ -1,3 +1,4 @@
14811+.section .rodata,"a",@progbits
14812 ENTRY(sys_call_table)
14813 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14814 .long sys_exit
14815diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14816--- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14817+++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14818@@ -24,17 +24,224 @@
14819
14820 #include <asm/syscalls.h>
14821
14822-/*
14823- * Do a system call from kernel instead of calling sys_execve so we
14824- * end up with proper pt_regs.
14825- */
14826-int kernel_execve(const char *filename,
14827- const char *const argv[],
14828- const char *const envp[])
14829+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14830 {
14831- long __res;
14832- asm volatile ("int $0x80"
14833- : "=a" (__res)
14834- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14835- return __res;
14836+ unsigned long pax_task_size = TASK_SIZE;
14837+
14838+#ifdef CONFIG_PAX_SEGMEXEC
14839+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14840+ pax_task_size = SEGMEXEC_TASK_SIZE;
14841+#endif
14842+
14843+ if (len > pax_task_size || addr > pax_task_size - len)
14844+ return -EINVAL;
14845+
14846+ return 0;
14847+}
14848+
14849+unsigned long
14850+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14851+ unsigned long len, unsigned long pgoff, unsigned long flags)
14852+{
14853+ struct mm_struct *mm = current->mm;
14854+ struct vm_area_struct *vma;
14855+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14856+
14857+#ifdef CONFIG_PAX_SEGMEXEC
14858+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14859+ pax_task_size = SEGMEXEC_TASK_SIZE;
14860+#endif
14861+
14862+ pax_task_size -= PAGE_SIZE;
14863+
14864+ if (len > pax_task_size)
14865+ return -ENOMEM;
14866+
14867+ if (flags & MAP_FIXED)
14868+ return addr;
14869+
14870+#ifdef CONFIG_PAX_RANDMMAP
14871+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14872+#endif
14873+
14874+ if (addr) {
14875+ addr = PAGE_ALIGN(addr);
14876+ if (pax_task_size - len >= addr) {
14877+ vma = find_vma(mm, addr);
14878+ if (check_heap_stack_gap(vma, addr, len))
14879+ return addr;
14880+ }
14881+ }
14882+ if (len > mm->cached_hole_size) {
14883+ start_addr = addr = mm->free_area_cache;
14884+ } else {
14885+ start_addr = addr = mm->mmap_base;
14886+ mm->cached_hole_size = 0;
14887+ }
14888+
14889+#ifdef CONFIG_PAX_PAGEEXEC
14890+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14891+ start_addr = 0x00110000UL;
14892+
14893+#ifdef CONFIG_PAX_RANDMMAP
14894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14895+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14896+#endif
14897+
14898+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14899+ start_addr = addr = mm->mmap_base;
14900+ else
14901+ addr = start_addr;
14902+ }
14903+#endif
14904+
14905+full_search:
14906+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14907+ /* At this point: (!vma || addr < vma->vm_end). */
14908+ if (pax_task_size - len < addr) {
14909+ /*
14910+ * Start a new search - just in case we missed
14911+ * some holes.
14912+ */
14913+ if (start_addr != mm->mmap_base) {
14914+ start_addr = addr = mm->mmap_base;
14915+ mm->cached_hole_size = 0;
14916+ goto full_search;
14917+ }
14918+ return -ENOMEM;
14919+ }
14920+ if (check_heap_stack_gap(vma, addr, len))
14921+ break;
14922+ if (addr + mm->cached_hole_size < vma->vm_start)
14923+ mm->cached_hole_size = vma->vm_start - addr;
14924+ addr = vma->vm_end;
14925+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14926+ start_addr = addr = mm->mmap_base;
14927+ mm->cached_hole_size = 0;
14928+ goto full_search;
14929+ }
14930+ }
14931+
14932+ /*
14933+ * Remember the place where we stopped the search:
14934+ */
14935+ mm->free_area_cache = addr + len;
14936+ return addr;
14937+}
14938+
14939+unsigned long
14940+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14941+ const unsigned long len, const unsigned long pgoff,
14942+ const unsigned long flags)
14943+{
14944+ struct vm_area_struct *vma;
14945+ struct mm_struct *mm = current->mm;
14946+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14947+
14948+#ifdef CONFIG_PAX_SEGMEXEC
14949+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14950+ pax_task_size = SEGMEXEC_TASK_SIZE;
14951+#endif
14952+
14953+ pax_task_size -= PAGE_SIZE;
14954+
14955+ /* requested length too big for entire address space */
14956+ if (len > pax_task_size)
14957+ return -ENOMEM;
14958+
14959+ if (flags & MAP_FIXED)
14960+ return addr;
14961+
14962+#ifdef CONFIG_PAX_PAGEEXEC
14963+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14964+ goto bottomup;
14965+#endif
14966+
14967+#ifdef CONFIG_PAX_RANDMMAP
14968+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14969+#endif
14970+
14971+ /* requesting a specific address */
14972+ if (addr) {
14973+ addr = PAGE_ALIGN(addr);
14974+ if (pax_task_size - len >= addr) {
14975+ vma = find_vma(mm, addr);
14976+ if (check_heap_stack_gap(vma, addr, len))
14977+ return addr;
14978+ }
14979+ }
14980+
14981+ /* check if free_area_cache is useful for us */
14982+ if (len <= mm->cached_hole_size) {
14983+ mm->cached_hole_size = 0;
14984+ mm->free_area_cache = mm->mmap_base;
14985+ }
14986+
14987+ /* either no address requested or can't fit in requested address hole */
14988+ addr = mm->free_area_cache;
14989+
14990+ /* make sure it can fit in the remaining address space */
14991+ if (addr > len) {
14992+ vma = find_vma(mm, addr-len);
14993+ if (check_heap_stack_gap(vma, addr - len, len))
14994+ /* remember the address as a hint for next time */
14995+ return (mm->free_area_cache = addr-len);
14996+ }
14997+
14998+ if (mm->mmap_base < len)
14999+ goto bottomup;
15000+
15001+ addr = mm->mmap_base-len;
15002+
15003+ do {
15004+ /*
15005+ * Lookup failure means no vma is above this address,
15006+ * else if new region fits below vma->vm_start,
15007+ * return with success:
15008+ */
15009+ vma = find_vma(mm, addr);
15010+ if (check_heap_stack_gap(vma, addr, len))
15011+ /* remember the address as a hint for next time */
15012+ return (mm->free_area_cache = addr);
15013+
15014+ /* remember the largest hole we saw so far */
15015+ if (addr + mm->cached_hole_size < vma->vm_start)
15016+ mm->cached_hole_size = vma->vm_start - addr;
15017+
15018+ /* try just below the current vma->vm_start */
15019+ addr = skip_heap_stack_gap(vma, len);
15020+ } while (!IS_ERR_VALUE(addr));
15021+
15022+bottomup:
15023+ /*
15024+ * A failed mmap() very likely causes application failure,
15025+ * so fall back to the bottom-up function here. This scenario
15026+ * can happen with large stack limits and large mmap()
15027+ * allocations.
15028+ */
15029+
15030+#ifdef CONFIG_PAX_SEGMEXEC
15031+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15032+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15033+ else
15034+#endif
15035+
15036+ mm->mmap_base = TASK_UNMAPPED_BASE;
15037+
15038+#ifdef CONFIG_PAX_RANDMMAP
15039+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15040+ mm->mmap_base += mm->delta_mmap;
15041+#endif
15042+
15043+ mm->free_area_cache = mm->mmap_base;
15044+ mm->cached_hole_size = ~0UL;
15045+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15046+ /*
15047+ * Restore the topdown base:
15048+ */
15049+ mm->mmap_base = base;
15050+ mm->free_area_cache = base;
15051+ mm->cached_hole_size = ~0UL;
15052+
15053+ return addr;
15054 }
15055diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15056--- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15057+++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15058@@ -32,8 +32,8 @@ out:
15059 return error;
15060 }
15061
15062-static void find_start_end(unsigned long flags, unsigned long *begin,
15063- unsigned long *end)
15064+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15065+ unsigned long *begin, unsigned long *end)
15066 {
15067 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15068 unsigned long new_begin;
15069@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15070 *begin = new_begin;
15071 }
15072 } else {
15073- *begin = TASK_UNMAPPED_BASE;
15074+ *begin = mm->mmap_base;
15075 *end = TASK_SIZE;
15076 }
15077 }
15078@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15079 if (flags & MAP_FIXED)
15080 return addr;
15081
15082- find_start_end(flags, &begin, &end);
15083+ find_start_end(mm, flags, &begin, &end);
15084
15085 if (len > end)
15086 return -ENOMEM;
15087
15088+#ifdef CONFIG_PAX_RANDMMAP
15089+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15090+#endif
15091+
15092 if (addr) {
15093 addr = PAGE_ALIGN(addr);
15094 vma = find_vma(mm, addr);
15095- if (end - len >= addr &&
15096- (!vma || addr + len <= vma->vm_start))
15097+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15098 return addr;
15099 }
15100 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15101@@ -106,7 +109,7 @@ full_search:
15102 }
15103 return -ENOMEM;
15104 }
15105- if (!vma || addr + len <= vma->vm_start) {
15106+ if (check_heap_stack_gap(vma, addr, len)) {
15107 /*
15108 * Remember the place where we stopped the search:
15109 */
15110@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15111 {
15112 struct vm_area_struct *vma;
15113 struct mm_struct *mm = current->mm;
15114- unsigned long addr = addr0;
15115+ unsigned long base = mm->mmap_base, addr = addr0;
15116
15117 /* requested length too big for entire address space */
15118 if (len > TASK_SIZE)
15119@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15120 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15121 goto bottomup;
15122
15123+#ifdef CONFIG_PAX_RANDMMAP
15124+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15125+#endif
15126+
15127 /* requesting a specific address */
15128 if (addr) {
15129 addr = PAGE_ALIGN(addr);
15130- vma = find_vma(mm, addr);
15131- if (TASK_SIZE - len >= addr &&
15132- (!vma || addr + len <= vma->vm_start))
15133- return addr;
15134+ if (TASK_SIZE - len >= addr) {
15135+ vma = find_vma(mm, addr);
15136+ if (check_heap_stack_gap(vma, addr, len))
15137+ return addr;
15138+ }
15139 }
15140
15141 /* check if free_area_cache is useful for us */
15142@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15143 /* make sure it can fit in the remaining address space */
15144 if (addr > len) {
15145 vma = find_vma(mm, addr-len);
15146- if (!vma || addr <= vma->vm_start)
15147+ if (check_heap_stack_gap(vma, addr - len, len))
15148 /* remember the address as a hint for next time */
15149 return mm->free_area_cache = addr-len;
15150 }
15151@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15152 * return with success:
15153 */
15154 vma = find_vma(mm, addr);
15155- if (!vma || addr+len <= vma->vm_start)
15156+ if (check_heap_stack_gap(vma, addr, len))
15157 /* remember the address as a hint for next time */
15158 return mm->free_area_cache = addr;
15159
15160@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15161 mm->cached_hole_size = vma->vm_start - addr;
15162
15163 /* try just below the current vma->vm_start */
15164- addr = vma->vm_start-len;
15165- } while (len < vma->vm_start);
15166+ addr = skip_heap_stack_gap(vma, len);
15167+ } while (!IS_ERR_VALUE(addr));
15168
15169 bottomup:
15170 /*
15171@@ -198,13 +206,21 @@ bottomup:
15172 * can happen with large stack limits and large mmap()
15173 * allocations.
15174 */
15175+ mm->mmap_base = TASK_UNMAPPED_BASE;
15176+
15177+#ifdef CONFIG_PAX_RANDMMAP
15178+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15179+ mm->mmap_base += mm->delta_mmap;
15180+#endif
15181+
15182+ mm->free_area_cache = mm->mmap_base;
15183 mm->cached_hole_size = ~0UL;
15184- mm->free_area_cache = TASK_UNMAPPED_BASE;
15185 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15186 /*
15187 * Restore the topdown base:
15188 */
15189- mm->free_area_cache = mm->mmap_base;
15190+ mm->mmap_base = base;
15191+ mm->free_area_cache = base;
15192 mm->cached_hole_size = ~0UL;
15193
15194 return addr;
15195diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15196--- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15197+++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15198@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15199
15200 void tboot_shutdown(u32 shutdown_type)
15201 {
15202- void (*shutdown)(void);
15203+ void (* __noreturn shutdown)(void);
15204
15205 if (!tboot_enabled())
15206 return;
15207@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15208
15209 switch_to_tboot_pt();
15210
15211- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15212+ shutdown = (void *)tboot->shutdown_entry;
15213 shutdown();
15214
15215 /* should not reach here */
15216@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15217 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15218 }
15219
15220-static atomic_t ap_wfs_count;
15221+static atomic_unchecked_t ap_wfs_count;
15222
15223 static int tboot_wait_for_aps(int num_aps)
15224 {
15225@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15226 {
15227 switch (action) {
15228 case CPU_DYING:
15229- atomic_inc(&ap_wfs_count);
15230+ atomic_inc_unchecked(&ap_wfs_count);
15231 if (num_online_cpus() == 1)
15232- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15233+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15234 return NOTIFY_BAD;
15235 break;
15236 }
15237@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15238
15239 tboot_create_trampoline();
15240
15241- atomic_set(&ap_wfs_count, 0);
15242+ atomic_set_unchecked(&ap_wfs_count, 0);
15243 register_hotcpu_notifier(&tboot_cpu_notifier);
15244 return 0;
15245 }
15246diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15247--- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15248+++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15249@@ -22,17 +22,13 @@
15250 #include <asm/hpet.h>
15251 #include <asm/time.h>
15252
15253-#ifdef CONFIG_X86_64
15254-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15255-#endif
15256-
15257 unsigned long profile_pc(struct pt_regs *regs)
15258 {
15259 unsigned long pc = instruction_pointer(regs);
15260
15261- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15262+ if (!user_mode(regs) && in_lock_functions(pc)) {
15263 #ifdef CONFIG_FRAME_POINTER
15264- return *(unsigned long *)(regs->bp + sizeof(long));
15265+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15266 #else
15267 unsigned long *sp =
15268 (unsigned long *)kernel_stack_pointer(regs);
15269@@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15270 * or above a saved flags. Eflags has bits 22-31 zero,
15271 * kernel addresses don't.
15272 */
15273+
15274+#ifdef CONFIG_PAX_KERNEXEC
15275+ return ktla_ktva(sp[0]);
15276+#else
15277 if (sp[0] >> 22)
15278 return sp[0];
15279 if (sp[1] >> 22)
15280 return sp[1];
15281 #endif
15282+
15283+#endif
15284 }
15285 return pc;
15286 }
15287diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15288--- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15289+++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15290@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15291 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15292 return -EINVAL;
15293
15294+#ifdef CONFIG_PAX_SEGMEXEC
15295+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15296+ return -EINVAL;
15297+#endif
15298+
15299 set_tls_desc(p, idx, &info, 1);
15300
15301 return 0;
15302diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15303--- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15304+++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15305@@ -32,6 +32,12 @@
15306 #include <asm/segment.h>
15307 #include <asm/page_types.h>
15308
15309+#ifdef CONFIG_PAX_KERNEXEC
15310+#define ta(X) (X)
15311+#else
15312+#define ta(X) ((X) - __PAGE_OFFSET)
15313+#endif
15314+
15315 #ifdef CONFIG_SMP
15316
15317 .section ".x86_trampoline","a"
15318@@ -62,7 +68,7 @@ r_base = .
15319 inc %ax # protected mode (PE) bit
15320 lmsw %ax # into protected mode
15321 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15322- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15323+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15324
15325 # These need to be in the same 64K segment as the above;
15326 # hence we don't use the boot_gdt_descr defined in head.S
15327diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15328--- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15329+++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15330@@ -90,7 +90,7 @@ startup_32:
15331 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15332 movl %eax, %ds
15333
15334- movl $X86_CR4_PAE, %eax
15335+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15336 movl %eax, %cr4 # Enable PAE mode
15337
15338 # Setup trampoline 4 level pagetables
15339@@ -138,7 +138,7 @@ tidt:
15340 # so the kernel can live anywhere
15341 .balign 4
15342 tgdt:
15343- .short tgdt_end - tgdt # gdt limit
15344+ .short tgdt_end - tgdt - 1 # gdt limit
15345 .long tgdt - r_base
15346 .short 0
15347 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15348diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15349--- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15350+++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15351@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15352
15353 /* Do we ignore FPU interrupts ? */
15354 char ignore_fpu_irq;
15355-
15356-/*
15357- * The IDT has to be page-aligned to simplify the Pentium
15358- * F0 0F bug workaround.
15359- */
15360-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15361 #endif
15362
15363 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15364@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15365 }
15366
15367 static void __kprobes
15368-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15369+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15370 long error_code, siginfo_t *info)
15371 {
15372 struct task_struct *tsk = current;
15373
15374 #ifdef CONFIG_X86_32
15375- if (regs->flags & X86_VM_MASK) {
15376+ if (v8086_mode(regs)) {
15377 /*
15378 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15379 * On nmi (interrupt 2), do_trap should not be called.
15380@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15381 }
15382 #endif
15383
15384- if (!user_mode(regs))
15385+ if (!user_mode_novm(regs))
15386 goto kernel_trap;
15387
15388 #ifdef CONFIG_X86_32
15389@@ -157,7 +151,7 @@ trap_signal:
15390 printk_ratelimit()) {
15391 printk(KERN_INFO
15392 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15393- tsk->comm, tsk->pid, str,
15394+ tsk->comm, task_pid_nr(tsk), str,
15395 regs->ip, regs->sp, error_code);
15396 print_vma_addr(" in ", regs->ip);
15397 printk("\n");
15398@@ -174,8 +168,20 @@ kernel_trap:
15399 if (!fixup_exception(regs)) {
15400 tsk->thread.error_code = error_code;
15401 tsk->thread.trap_no = trapnr;
15402+
15403+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15404+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15405+ str = "PAX: suspicious stack segment fault";
15406+#endif
15407+
15408 die(str, regs, error_code);
15409 }
15410+
15411+#ifdef CONFIG_PAX_REFCOUNT
15412+ if (trapnr == 4)
15413+ pax_report_refcount_overflow(regs);
15414+#endif
15415+
15416 return;
15417
15418 #ifdef CONFIG_X86_32
15419@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15420 conditional_sti(regs);
15421
15422 #ifdef CONFIG_X86_32
15423- if (regs->flags & X86_VM_MASK)
15424+ if (v8086_mode(regs))
15425 goto gp_in_vm86;
15426 #endif
15427
15428 tsk = current;
15429- if (!user_mode(regs))
15430+ if (!user_mode_novm(regs))
15431 goto gp_in_kernel;
15432
15433+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15434+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15435+ struct mm_struct *mm = tsk->mm;
15436+ unsigned long limit;
15437+
15438+ down_write(&mm->mmap_sem);
15439+ limit = mm->context.user_cs_limit;
15440+ if (limit < TASK_SIZE) {
15441+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15442+ up_write(&mm->mmap_sem);
15443+ return;
15444+ }
15445+ up_write(&mm->mmap_sem);
15446+ }
15447+#endif
15448+
15449 tsk->thread.error_code = error_code;
15450 tsk->thread.trap_no = 13;
15451
15452@@ -304,6 +326,13 @@ gp_in_kernel:
15453 if (notify_die(DIE_GPF, "general protection fault", regs,
15454 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15455 return;
15456+
15457+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15458+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15459+ die("PAX: suspicious general protection fault", regs, error_code);
15460+ else
15461+#endif
15462+
15463 die("general protection fault", regs, error_code);
15464 }
15465
15466@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15467 dotraplinkage notrace __kprobes void
15468 do_nmi(struct pt_regs *regs, long error_code)
15469 {
15470+
15471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15472+ if (!user_mode(regs)) {
15473+ unsigned long cs = regs->cs & 0xFFFF;
15474+ unsigned long ip = ktva_ktla(regs->ip);
15475+
15476+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15477+ regs->ip = ip;
15478+ }
15479+#endif
15480+
15481 nmi_enter();
15482
15483 inc_irq_stat(__nmi_count);
15484@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15485 /* It's safe to allow irq's after DR6 has been saved */
15486 preempt_conditional_sti(regs);
15487
15488- if (regs->flags & X86_VM_MASK) {
15489+ if (v8086_mode(regs)) {
15490 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15491 error_code, 1);
15492 preempt_conditional_cli(regs);
15493@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15494 * We already checked v86 mode above, so we can check for kernel mode
15495 * by just checking the CPL of CS.
15496 */
15497- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15498+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15499 tsk->thread.debugreg6 &= ~DR_STEP;
15500 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15501 regs->flags &= ~X86_EFLAGS_TF;
15502@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15503 return;
15504 conditional_sti(regs);
15505
15506- if (!user_mode_vm(regs))
15507+ if (!user_mode(regs))
15508 {
15509 if (!fixup_exception(regs)) {
15510 task->thread.error_code = error_code;
15511@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15512 void __math_state_restore(void)
15513 {
15514 struct thread_info *thread = current_thread_info();
15515- struct task_struct *tsk = thread->task;
15516+ struct task_struct *tsk = current;
15517
15518 /*
15519 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15520@@ -750,8 +790,7 @@ void __math_state_restore(void)
15521 */
15522 asmlinkage void math_state_restore(void)
15523 {
15524- struct thread_info *thread = current_thread_info();
15525- struct task_struct *tsk = thread->task;
15526+ struct task_struct *tsk = current;
15527
15528 if (!tsk_used_math(tsk)) {
15529 local_irq_enable();
15530diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15531--- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15532+++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15533@@ -20,6 +20,7 @@
15534 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15535 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15536 * arch/x86/kernel/head_32.S: processor startup
15537+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15538 *
15539 * verify_cpu, returns the status of longmode and SSE in register %eax.
15540 * 0: Success 1: Failure
15541diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15542--- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15543+++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15544@@ -41,6 +41,7 @@
15545 #include <linux/ptrace.h>
15546 #include <linux/audit.h>
15547 #include <linux/stddef.h>
15548+#include <linux/grsecurity.h>
15549
15550 #include <asm/uaccess.h>
15551 #include <asm/io.h>
15552@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15553 do_exit(SIGSEGV);
15554 }
15555
15556- tss = &per_cpu(init_tss, get_cpu());
15557+ tss = init_tss + get_cpu();
15558 current->thread.sp0 = current->thread.saved_sp0;
15559 current->thread.sysenter_cs = __KERNEL_CS;
15560 load_sp0(tss, &current->thread);
15561@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15562 struct task_struct *tsk;
15563 int tmp, ret = -EPERM;
15564
15565+#ifdef CONFIG_GRKERNSEC_VM86
15566+ if (!capable(CAP_SYS_RAWIO)) {
15567+ gr_handle_vm86();
15568+ goto out;
15569+ }
15570+#endif
15571+
15572 tsk = current;
15573 if (tsk->thread.saved_sp0)
15574 goto out;
15575@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15576 int tmp, ret;
15577 struct vm86plus_struct __user *v86;
15578
15579+#ifdef CONFIG_GRKERNSEC_VM86
15580+ if (!capable(CAP_SYS_RAWIO)) {
15581+ gr_handle_vm86();
15582+ ret = -EPERM;
15583+ goto out;
15584+ }
15585+#endif
15586+
15587 tsk = current;
15588 switch (cmd) {
15589 case VM86_REQUEST_IRQ:
15590@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15591 tsk->thread.saved_fs = info->regs32->fs;
15592 tsk->thread.saved_gs = get_user_gs(info->regs32);
15593
15594- tss = &per_cpu(init_tss, get_cpu());
15595+ tss = init_tss + get_cpu();
15596 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15597 if (cpu_has_sep)
15598 tsk->thread.sysenter_cs = 0;
15599@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15600 goto cannot_handle;
15601 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15602 goto cannot_handle;
15603- intr_ptr = (unsigned long __user *) (i << 2);
15604+ intr_ptr = (__force unsigned long __user *) (i << 2);
15605 if (get_user(segoffs, intr_ptr))
15606 goto cannot_handle;
15607 if ((segoffs >> 16) == BIOSSEG)
15608diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15609--- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15610+++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15611@@ -26,6 +26,13 @@
15612 #include <asm/page_types.h>
15613 #include <asm/cache.h>
15614 #include <asm/boot.h>
15615+#include <asm/segment.h>
15616+
15617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15618+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15619+#else
15620+#define __KERNEL_TEXT_OFFSET 0
15621+#endif
15622
15623 #undef i386 /* in case the preprocessor is a 32bit one */
15624
15625@@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15626 #ifdef CONFIG_X86_32
15627 OUTPUT_ARCH(i386)
15628 ENTRY(phys_startup_32)
15629-jiffies = jiffies_64;
15630 #else
15631 OUTPUT_ARCH(i386:x86-64)
15632 ENTRY(phys_startup_64)
15633-jiffies_64 = jiffies;
15634 #endif
15635
15636 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15637@@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15638
15639 PHDRS {
15640 text PT_LOAD FLAGS(5); /* R_E */
15641+#ifdef CONFIG_X86_32
15642+ module PT_LOAD FLAGS(5); /* R_E */
15643+#endif
15644+#ifdef CONFIG_XEN
15645+ rodata PT_LOAD FLAGS(5); /* R_E */
15646+#else
15647+ rodata PT_LOAD FLAGS(4); /* R__ */
15648+#endif
15649 data PT_LOAD FLAGS(6); /* RW_ */
15650 #ifdef CONFIG_X86_64
15651 user PT_LOAD FLAGS(5); /* R_E */
15652+#endif
15653+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15654 #ifdef CONFIG_SMP
15655 percpu PT_LOAD FLAGS(6); /* RW_ */
15656 #endif
15657+ text.init PT_LOAD FLAGS(5); /* R_E */
15658+ text.exit PT_LOAD FLAGS(5); /* R_E */
15659 init PT_LOAD FLAGS(7); /* RWE */
15660-#endif
15661 note PT_NOTE FLAGS(0); /* ___ */
15662 }
15663
15664 SECTIONS
15665 {
15666 #ifdef CONFIG_X86_32
15667- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15668- phys_startup_32 = startup_32 - LOAD_OFFSET;
15669+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15670 #else
15671- . = __START_KERNEL;
15672- phys_startup_64 = startup_64 - LOAD_OFFSET;
15673+ . = __START_KERNEL;
15674 #endif
15675
15676 /* Text and read-only data */
15677- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15678- _text = .;
15679+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15680 /* bootstrapping code */
15681+#ifdef CONFIG_X86_32
15682+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15683+#else
15684+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15685+#endif
15686+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15687+ _text = .;
15688 HEAD_TEXT
15689 #ifdef CONFIG_X86_32
15690 . = ALIGN(PAGE_SIZE);
15691@@ -109,13 +129,47 @@ SECTIONS
15692 IRQENTRY_TEXT
15693 *(.fixup)
15694 *(.gnu.warning)
15695- /* End of text section */
15696- _etext = .;
15697 } :text = 0x9090
15698
15699- NOTES :text :note
15700+ . += __KERNEL_TEXT_OFFSET;
15701+
15702+#ifdef CONFIG_X86_32
15703+ . = ALIGN(PAGE_SIZE);
15704+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15705+
15706+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15707+ MODULES_EXEC_VADDR = .;
15708+ BYTE(0)
15709+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15710+ . = ALIGN(HPAGE_SIZE);
15711+ MODULES_EXEC_END = . - 1;
15712+#endif
15713+
15714+ } :module
15715+#endif
15716+
15717+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15718+ /* End of text section */
15719+ _etext = . - __KERNEL_TEXT_OFFSET;
15720+ }
15721
15722- EXCEPTION_TABLE(16) :text = 0x9090
15723+#ifdef CONFIG_X86_32
15724+ . = ALIGN(PAGE_SIZE);
15725+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15726+ *(.idt)
15727+ . = ALIGN(PAGE_SIZE);
15728+ *(.empty_zero_page)
15729+ *(.initial_pg_fixmap)
15730+ *(.initial_pg_pmd)
15731+ *(.initial_page_table)
15732+ *(.swapper_pg_dir)
15733+ } :rodata
15734+#endif
15735+
15736+ . = ALIGN(PAGE_SIZE);
15737+ NOTES :rodata :note
15738+
15739+ EXCEPTION_TABLE(16) :rodata
15740
15741 #if defined(CONFIG_DEBUG_RODATA)
15742 /* .text should occupy whole number of pages */
15743@@ -127,16 +181,20 @@ SECTIONS
15744
15745 /* Data */
15746 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15747+
15748+#ifdef CONFIG_PAX_KERNEXEC
15749+ . = ALIGN(HPAGE_SIZE);
15750+#else
15751+ . = ALIGN(PAGE_SIZE);
15752+#endif
15753+
15754 /* Start of data section */
15755 _sdata = .;
15756
15757 /* init_task */
15758 INIT_TASK_DATA(THREAD_SIZE)
15759
15760-#ifdef CONFIG_X86_32
15761- /* 32 bit has nosave before _edata */
15762 NOSAVE_DATA
15763-#endif
15764
15765 PAGE_ALIGNED_DATA(PAGE_SIZE)
15766
15767@@ -145,6 +203,8 @@ SECTIONS
15768 DATA_DATA
15769 CONSTRUCTORS
15770
15771+ jiffies = jiffies_64;
15772+
15773 /* rarely changed data like cpu maps */
15774 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15775
15776@@ -199,12 +259,6 @@ SECTIONS
15777 }
15778 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15779
15780- . = ALIGN(L1_CACHE_BYTES);
15781- .jiffies : AT(VLOAD(.jiffies)) {
15782- *(.jiffies)
15783- }
15784- jiffies = VVIRT(.jiffies);
15785-
15786 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15787 *(.vsyscall_3)
15788 }
15789@@ -220,12 +274,19 @@ SECTIONS
15790 #endif /* CONFIG_X86_64 */
15791
15792 /* Init code and data - will be freed after init */
15793- . = ALIGN(PAGE_SIZE);
15794 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15795+ BYTE(0)
15796+
15797+#ifdef CONFIG_PAX_KERNEXEC
15798+ . = ALIGN(HPAGE_SIZE);
15799+#else
15800+ . = ALIGN(PAGE_SIZE);
15801+#endif
15802+
15803 __init_begin = .; /* paired with __init_end */
15804- }
15805+ } :init.begin
15806
15807-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15808+#ifdef CONFIG_SMP
15809 /*
15810 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15811 * output PHDR, so the next output section - .init.text - should
15812@@ -234,12 +295,27 @@ SECTIONS
15813 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15814 #endif
15815
15816- INIT_TEXT_SECTION(PAGE_SIZE)
15817-#ifdef CONFIG_X86_64
15818- :init
15819-#endif
15820+ . = ALIGN(PAGE_SIZE);
15821+ init_begin = .;
15822+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15823+ VMLINUX_SYMBOL(_sinittext) = .;
15824+ INIT_TEXT
15825+ VMLINUX_SYMBOL(_einittext) = .;
15826+ . = ALIGN(PAGE_SIZE);
15827+ } :text.init
15828
15829- INIT_DATA_SECTION(16)
15830+ /*
15831+ * .exit.text is discard at runtime, not link time, to deal with
15832+ * references from .altinstructions and .eh_frame
15833+ */
15834+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15835+ EXIT_TEXT
15836+ . = ALIGN(16);
15837+ } :text.exit
15838+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15839+
15840+ . = ALIGN(PAGE_SIZE);
15841+ INIT_DATA_SECTION(16) :init
15842
15843 /*
15844 * Code and data for a variety of lowlevel trampolines, to be
15845@@ -306,19 +382,12 @@ SECTIONS
15846 }
15847
15848 . = ALIGN(8);
15849- /*
15850- * .exit.text is discard at runtime, not link time, to deal with
15851- * references from .altinstructions and .eh_frame
15852- */
15853- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15854- EXIT_TEXT
15855- }
15856
15857 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15858 EXIT_DATA
15859 }
15860
15861-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15862+#ifndef CONFIG_SMP
15863 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15864 #endif
15865
15866@@ -337,16 +406,10 @@ SECTIONS
15867 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15868 __smp_locks = .;
15869 *(.smp_locks)
15870- . = ALIGN(PAGE_SIZE);
15871 __smp_locks_end = .;
15872+ . = ALIGN(PAGE_SIZE);
15873 }
15874
15875-#ifdef CONFIG_X86_64
15876- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15877- NOSAVE_DATA
15878- }
15879-#endif
15880-
15881 /* BSS */
15882 . = ALIGN(PAGE_SIZE);
15883 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15884@@ -362,6 +425,7 @@ SECTIONS
15885 __brk_base = .;
15886 . += 64 * 1024; /* 64k alignment slop space */
15887 *(.brk_reservation) /* areas brk users have reserved */
15888+ . = ALIGN(HPAGE_SIZE);
15889 __brk_limit = .;
15890 }
15891
15892@@ -388,13 +452,12 @@ SECTIONS
15893 * for the boot processor.
15894 */
15895 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15896-INIT_PER_CPU(gdt_page);
15897 INIT_PER_CPU(irq_stack_union);
15898
15899 /*
15900 * Build-time check on the image size:
15901 */
15902-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15903+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15904 "kernel image bigger than KERNEL_IMAGE_SIZE");
15905
15906 #ifdef CONFIG_SMP
15907diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15908--- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15909+++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15910@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15911
15912 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15913 /* copy vsyscall data */
15914+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15915 vsyscall_gtod_data.clock.vread = clock->vread;
15916 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15917 vsyscall_gtod_data.clock.mask = clock->mask;
15918@@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15919 We do this here because otherwise user space would do it on
15920 its own in a likely inferior way (no access to jiffies).
15921 If you don't like it pass NULL. */
15922- if (tcache && tcache->blob[0] == (j = __jiffies)) {
15923+ if (tcache && tcache->blob[0] == (j = jiffies)) {
15924 p = tcache->blob[1];
15925 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15926 /* Load per CPU data from RDTSCP */
15927diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15928--- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15929+++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15930@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15931 EXPORT_SYMBOL(copy_user_generic_string);
15932 EXPORT_SYMBOL(copy_user_generic_unrolled);
15933 EXPORT_SYMBOL(__copy_user_nocache);
15934-EXPORT_SYMBOL(_copy_from_user);
15935-EXPORT_SYMBOL(_copy_to_user);
15936
15937 EXPORT_SYMBOL(copy_page);
15938 EXPORT_SYMBOL(clear_page);
15939diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15940--- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15941+++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15942@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15943 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15944 return -EINVAL;
15945
15946- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15947+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15948 fx_sw_user->extended_size -
15949 FP_XSTATE_MAGIC2_SIZE));
15950 if (err)
15951@@ -267,7 +267,7 @@ fx_only:
15952 * the other extended state.
15953 */
15954 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15955- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15956+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15957 }
15958
15959 /*
15960@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15961 if (use_xsave())
15962 err = restore_user_xstate(buf);
15963 else
15964- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15965+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15966 buf);
15967 if (unlikely(err)) {
15968 /*
15969diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
15970--- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
15971+++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
15972@@ -89,7 +89,7 @@
15973 #define Src2ImmByte (2<<29)
15974 #define Src2One (3<<29)
15975 #define Src2Imm (4<<29)
15976-#define Src2Mask (7<<29)
15977+#define Src2Mask (7U<<29)
15978
15979 #define X2(x...) x, x
15980 #define X3(x...) X2(x), x
15981@@ -190,6 +190,7 @@ struct group_dual {
15982
15983 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15984 do { \
15985+ unsigned long _tmp; \
15986 __asm__ __volatile__ ( \
15987 _PRE_EFLAGS("0", "4", "2") \
15988 _op _suffix " %"_x"3,%1; " \
15989@@ -203,8 +204,6 @@ struct group_dual {
15990 /* Raw emulation: instruction has two explicit operands. */
15991 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15992 do { \
15993- unsigned long _tmp; \
15994- \
15995 switch ((_dst).bytes) { \
15996 case 2: \
15997 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15998@@ -220,7 +219,6 @@ struct group_dual {
15999
16000 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16001 do { \
16002- unsigned long _tmp; \
16003 switch ((_dst).bytes) { \
16004 case 1: \
16005 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16006diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16007--- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16008+++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16009@@ -53,7 +53,7 @@
16010 #define APIC_BUS_CYCLE_NS 1
16011
16012 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16013-#define apic_debug(fmt, arg...)
16014+#define apic_debug(fmt, arg...) do {} while (0)
16015
16016 #define APIC_LVT_NUM 6
16017 /* 14 is the version for Xeon and Pentium 8.4.8*/
16018diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16019--- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16020+++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16021@@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16022
16023 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16024
16025- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16026+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16027
16028 /*
16029 * Assume that the pte write on a page table of the same type
16030@@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16031 smp_rmb();
16032
16033 spin_lock(&vcpu->kvm->mmu_lock);
16034- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16035+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16036 gentry = 0;
16037 kvm_mmu_free_some_pages(vcpu);
16038 ++vcpu->kvm->stat.mmu_pte_write;
16039diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16040--- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16041+++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16042@@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16043 unsigned long mmu_seq;
16044 bool map_writable;
16045
16046+ pax_track_stack();
16047+
16048 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16049
16050 r = mmu_topup_memory_caches(vcpu);
16051@@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16052 if (need_flush)
16053 kvm_flush_remote_tlbs(vcpu->kvm);
16054
16055- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16056+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16057
16058 spin_unlock(&vcpu->kvm->mmu_lock);
16059
16060diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16061--- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16062+++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16063@@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16064 int cpu = raw_smp_processor_id();
16065
16066 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16067+
16068+ pax_open_kernel();
16069 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16070+ pax_close_kernel();
16071+
16072 load_TR_desc();
16073 }
16074
16075@@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16076 #endif
16077 #endif
16078
16079+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16080+ __set_fs(current_thread_info()->addr_limit);
16081+#endif
16082+
16083 reload_tss(vcpu);
16084
16085 local_irq_disable();
16086diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16087--- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16088+++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16089@@ -725,7 +725,11 @@ static void reload_tss(void)
16090 struct desc_struct *descs;
16091
16092 descs = (void *)gdt->address;
16093+
16094+ pax_open_kernel();
16095 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16096+ pax_close_kernel();
16097+
16098 load_TR_desc();
16099 }
16100
16101@@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16102 if (!cpu_has_vmx_flexpriority())
16103 flexpriority_enabled = 0;
16104
16105- if (!cpu_has_vmx_tpr_shadow())
16106- kvm_x86_ops->update_cr8_intercept = NULL;
16107+ if (!cpu_has_vmx_tpr_shadow()) {
16108+ pax_open_kernel();
16109+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16110+ pax_close_kernel();
16111+ }
16112
16113 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16114 kvm_disable_largepages();
16115@@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16116 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16117
16118 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16119- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16120+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16121 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16122 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16123 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16124@@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16125 "jmp .Lkvm_vmx_return \n\t"
16126 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16127 ".Lkvm_vmx_return: "
16128+
16129+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16130+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16131+ ".Lkvm_vmx_return2: "
16132+#endif
16133+
16134 /* Save guest registers, load host registers, keep flags */
16135 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16136 "pop %0 \n\t"
16137@@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16138 #endif
16139 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16140 [wordsize]"i"(sizeof(ulong))
16141+
16142+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16143+ ,[cs]"i"(__KERNEL_CS)
16144+#endif
16145+
16146 : "cc", "memory"
16147 , R"ax", R"bx", R"di", R"si"
16148 #ifdef CONFIG_X86_64
16149@@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16150
16151 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16152
16153- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16154+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16155+
16156+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16157+ loadsegment(fs, __KERNEL_PERCPU);
16158+#endif
16159+
16160+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16161+ __set_fs(current_thread_info()->addr_limit);
16162+#endif
16163+
16164 vmx->launched = 1;
16165
16166 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16167diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16168--- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16169+++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16170@@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16171 if (n < msr_list.nmsrs)
16172 goto out;
16173 r = -EFAULT;
16174+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16175+ goto out;
16176 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16177 num_msrs_to_save * sizeof(u32)))
16178 goto out;
16179@@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16180 struct kvm_cpuid2 *cpuid,
16181 struct kvm_cpuid_entry2 __user *entries)
16182 {
16183- int r;
16184+ int r, i;
16185
16186 r = -E2BIG;
16187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16188 goto out;
16189 r = -EFAULT;
16190- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16191- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16192+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16193 goto out;
16194+ for (i = 0; i < cpuid->nent; ++i) {
16195+ struct kvm_cpuid_entry2 cpuid_entry;
16196+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16197+ goto out;
16198+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16199+ }
16200 vcpu->arch.cpuid_nent = cpuid->nent;
16201 kvm_apic_set_version(vcpu);
16202 kvm_x86_ops->cpuid_update(vcpu);
16203@@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16204 struct kvm_cpuid2 *cpuid,
16205 struct kvm_cpuid_entry2 __user *entries)
16206 {
16207- int r;
16208+ int r, i;
16209
16210 r = -E2BIG;
16211 if (cpuid->nent < vcpu->arch.cpuid_nent)
16212 goto out;
16213 r = -EFAULT;
16214- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16215- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16216+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16217 goto out;
16218+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16219+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16220+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16221+ goto out;
16222+ }
16223 return 0;
16224
16225 out:
16226@@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16227 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16228 struct kvm_interrupt *irq)
16229 {
16230- if (irq->irq < 0 || irq->irq >= 256)
16231+ if (irq->irq >= 256)
16232 return -EINVAL;
16233 if (irqchip_in_kernel(vcpu->kvm))
16234 return -ENXIO;
16235@@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16236 }
16237 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16238
16239-int kvm_arch_init(void *opaque)
16240+int kvm_arch_init(const void *opaque)
16241 {
16242 int r;
16243 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16244diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16245--- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16246+++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16247@@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16248 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16249 * Launcher to reboot us.
16250 */
16251-static void lguest_restart(char *reason)
16252+static __noreturn void lguest_restart(char *reason)
16253 {
16254 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16255+ BUG();
16256 }
16257
16258 /*G:050
16259diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16260--- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16261+++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16262@@ -8,18 +8,30 @@
16263
16264 long long atomic64_read_cx8(long long, const atomic64_t *v);
16265 EXPORT_SYMBOL(atomic64_read_cx8);
16266+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16267+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16268 long long atomic64_set_cx8(long long, const atomic64_t *v);
16269 EXPORT_SYMBOL(atomic64_set_cx8);
16270+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16271+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16272 long long atomic64_xchg_cx8(long long, unsigned high);
16273 EXPORT_SYMBOL(atomic64_xchg_cx8);
16274 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_add_return_cx8);
16276+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16277+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16278 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16279 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16280+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16281+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16282 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16283 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16284+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16285+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16286 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16287 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16288+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16289+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16290 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16291 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16292 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16293@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16294 #ifndef CONFIG_X86_CMPXCHG64
16295 long long atomic64_read_386(long long, const atomic64_t *v);
16296 EXPORT_SYMBOL(atomic64_read_386);
16297+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16298+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16299 long long atomic64_set_386(long long, const atomic64_t *v);
16300 EXPORT_SYMBOL(atomic64_set_386);
16301+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16302+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16303 long long atomic64_xchg_386(long long, unsigned high);
16304 EXPORT_SYMBOL(atomic64_xchg_386);
16305 long long atomic64_add_return_386(long long a, atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_add_return_386);
16307+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16308+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16309 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16310 EXPORT_SYMBOL(atomic64_sub_return_386);
16311+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16312+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16313 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16314 EXPORT_SYMBOL(atomic64_inc_return_386);
16315+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16316+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16317 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16318 EXPORT_SYMBOL(atomic64_dec_return_386);
16319+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16320+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16321 long long atomic64_add_386(long long a, atomic64_t *v);
16322 EXPORT_SYMBOL(atomic64_add_386);
16323+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16324+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16325 long long atomic64_sub_386(long long a, atomic64_t *v);
16326 EXPORT_SYMBOL(atomic64_sub_386);
16327+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16328+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16329 long long atomic64_inc_386(long long a, atomic64_t *v);
16330 EXPORT_SYMBOL(atomic64_inc_386);
16331+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16332+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16333 long long atomic64_dec_386(long long a, atomic64_t *v);
16334 EXPORT_SYMBOL(atomic64_dec_386);
16335+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16336+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16337 long long atomic64_dec_if_positive_386(atomic64_t *v);
16338 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16339 int atomic64_inc_not_zero_386(atomic64_t *v);
16340diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16341--- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16342+++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16343@@ -48,6 +48,10 @@ BEGIN(read)
16344 movl (v), %eax
16345 movl 4(v), %edx
16346 RET_ENDP
16347+BEGIN(read_unchecked)
16348+ movl (v), %eax
16349+ movl 4(v), %edx
16350+RET_ENDP
16351 #undef v
16352
16353 #define v %esi
16354@@ -55,6 +59,10 @@ BEGIN(set)
16355 movl %ebx, (v)
16356 movl %ecx, 4(v)
16357 RET_ENDP
16358+BEGIN(set_unchecked)
16359+ movl %ebx, (v)
16360+ movl %ecx, 4(v)
16361+RET_ENDP
16362 #undef v
16363
16364 #define v %esi
16365@@ -70,6 +78,20 @@ RET_ENDP
16366 BEGIN(add)
16367 addl %eax, (v)
16368 adcl %edx, 4(v)
16369+
16370+#ifdef CONFIG_PAX_REFCOUNT
16371+ jno 0f
16372+ subl %eax, (v)
16373+ sbbl %edx, 4(v)
16374+ int $4
16375+0:
16376+ _ASM_EXTABLE(0b, 0b)
16377+#endif
16378+
16379+RET_ENDP
16380+BEGIN(add_unchecked)
16381+ addl %eax, (v)
16382+ adcl %edx, 4(v)
16383 RET_ENDP
16384 #undef v
16385
16386@@ -77,6 +99,24 @@ RET_ENDP
16387 BEGIN(add_return)
16388 addl (v), %eax
16389 adcl 4(v), %edx
16390+
16391+#ifdef CONFIG_PAX_REFCOUNT
16392+ into
16393+1234:
16394+ _ASM_EXTABLE(1234b, 2f)
16395+#endif
16396+
16397+ movl %eax, (v)
16398+ movl %edx, 4(v)
16399+
16400+#ifdef CONFIG_PAX_REFCOUNT
16401+2:
16402+#endif
16403+
16404+RET_ENDP
16405+BEGIN(add_return_unchecked)
16406+ addl (v), %eax
16407+ adcl 4(v), %edx
16408 movl %eax, (v)
16409 movl %edx, 4(v)
16410 RET_ENDP
16411@@ -86,6 +126,20 @@ RET_ENDP
16412 BEGIN(sub)
16413 subl %eax, (v)
16414 sbbl %edx, 4(v)
16415+
16416+#ifdef CONFIG_PAX_REFCOUNT
16417+ jno 0f
16418+ addl %eax, (v)
16419+ adcl %edx, 4(v)
16420+ int $4
16421+0:
16422+ _ASM_EXTABLE(0b, 0b)
16423+#endif
16424+
16425+RET_ENDP
16426+BEGIN(sub_unchecked)
16427+ subl %eax, (v)
16428+ sbbl %edx, 4(v)
16429 RET_ENDP
16430 #undef v
16431
16432@@ -96,6 +150,27 @@ BEGIN(sub_return)
16433 sbbl $0, %edx
16434 addl (v), %eax
16435 adcl 4(v), %edx
16436+
16437+#ifdef CONFIG_PAX_REFCOUNT
16438+ into
16439+1234:
16440+ _ASM_EXTABLE(1234b, 2f)
16441+#endif
16442+
16443+ movl %eax, (v)
16444+ movl %edx, 4(v)
16445+
16446+#ifdef CONFIG_PAX_REFCOUNT
16447+2:
16448+#endif
16449+
16450+RET_ENDP
16451+BEGIN(sub_return_unchecked)
16452+ negl %edx
16453+ negl %eax
16454+ sbbl $0, %edx
16455+ addl (v), %eax
16456+ adcl 4(v), %edx
16457 movl %eax, (v)
16458 movl %edx, 4(v)
16459 RET_ENDP
16460@@ -105,6 +180,20 @@ RET_ENDP
16461 BEGIN(inc)
16462 addl $1, (v)
16463 adcl $0, 4(v)
16464+
16465+#ifdef CONFIG_PAX_REFCOUNT
16466+ jno 0f
16467+ subl $1, (v)
16468+ sbbl $0, 4(v)
16469+ int $4
16470+0:
16471+ _ASM_EXTABLE(0b, 0b)
16472+#endif
16473+
16474+RET_ENDP
16475+BEGIN(inc_unchecked)
16476+ addl $1, (v)
16477+ adcl $0, 4(v)
16478 RET_ENDP
16479 #undef v
16480
16481@@ -114,6 +203,26 @@ BEGIN(inc_return)
16482 movl 4(v), %edx
16483 addl $1, %eax
16484 adcl $0, %edx
16485+
16486+#ifdef CONFIG_PAX_REFCOUNT
16487+ into
16488+1234:
16489+ _ASM_EXTABLE(1234b, 2f)
16490+#endif
16491+
16492+ movl %eax, (v)
16493+ movl %edx, 4(v)
16494+
16495+#ifdef CONFIG_PAX_REFCOUNT
16496+2:
16497+#endif
16498+
16499+RET_ENDP
16500+BEGIN(inc_return_unchecked)
16501+ movl (v), %eax
16502+ movl 4(v), %edx
16503+ addl $1, %eax
16504+ adcl $0, %edx
16505 movl %eax, (v)
16506 movl %edx, 4(v)
16507 RET_ENDP
16508@@ -123,6 +232,20 @@ RET_ENDP
16509 BEGIN(dec)
16510 subl $1, (v)
16511 sbbl $0, 4(v)
16512+
16513+#ifdef CONFIG_PAX_REFCOUNT
16514+ jno 0f
16515+ addl $1, (v)
16516+ adcl $0, 4(v)
16517+ int $4
16518+0:
16519+ _ASM_EXTABLE(0b, 0b)
16520+#endif
16521+
16522+RET_ENDP
16523+BEGIN(dec_unchecked)
16524+ subl $1, (v)
16525+ sbbl $0, 4(v)
16526 RET_ENDP
16527 #undef v
16528
16529@@ -132,6 +255,26 @@ BEGIN(dec_return)
16530 movl 4(v), %edx
16531 subl $1, %eax
16532 sbbl $0, %edx
16533+
16534+#ifdef CONFIG_PAX_REFCOUNT
16535+ into
16536+1234:
16537+ _ASM_EXTABLE(1234b, 2f)
16538+#endif
16539+
16540+ movl %eax, (v)
16541+ movl %edx, 4(v)
16542+
16543+#ifdef CONFIG_PAX_REFCOUNT
16544+2:
16545+#endif
16546+
16547+RET_ENDP
16548+BEGIN(dec_return_unchecked)
16549+ movl (v), %eax
16550+ movl 4(v), %edx
16551+ subl $1, %eax
16552+ sbbl $0, %edx
16553 movl %eax, (v)
16554 movl %edx, 4(v)
16555 RET_ENDP
16556@@ -143,6 +286,13 @@ BEGIN(add_unless)
16557 adcl %edx, %edi
16558 addl (v), %eax
16559 adcl 4(v), %edx
16560+
16561+#ifdef CONFIG_PAX_REFCOUNT
16562+ into
16563+1234:
16564+ _ASM_EXTABLE(1234b, 2f)
16565+#endif
16566+
16567 cmpl %eax, %esi
16568 je 3f
16569 1:
16570@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16571 1:
16572 addl $1, %eax
16573 adcl $0, %edx
16574+
16575+#ifdef CONFIG_PAX_REFCOUNT
16576+ into
16577+1234:
16578+ _ASM_EXTABLE(1234b, 2f)
16579+#endif
16580+
16581 movl %eax, (v)
16582 movl %edx, 4(v)
16583 movl $1, %eax
16584@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16585 movl 4(v), %edx
16586 subl $1, %eax
16587 sbbl $0, %edx
16588+
16589+#ifdef CONFIG_PAX_REFCOUNT
16590+ into
16591+1234:
16592+ _ASM_EXTABLE(1234b, 1f)
16593+#endif
16594+
16595 js 1f
16596 movl %eax, (v)
16597 movl %edx, 4(v)
16598diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16599--- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16600+++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16601@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16602 CFI_ENDPROC
16603 ENDPROC(atomic64_read_cx8)
16604
16605+ENTRY(atomic64_read_unchecked_cx8)
16606+ CFI_STARTPROC
16607+
16608+ read64 %ecx
16609+ ret
16610+ CFI_ENDPROC
16611+ENDPROC(atomic64_read_unchecked_cx8)
16612+
16613 ENTRY(atomic64_set_cx8)
16614 CFI_STARTPROC
16615
16616@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16617 CFI_ENDPROC
16618 ENDPROC(atomic64_set_cx8)
16619
16620+ENTRY(atomic64_set_unchecked_cx8)
16621+ CFI_STARTPROC
16622+
16623+1:
16624+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16625+ * are atomic on 586 and newer */
16626+ cmpxchg8b (%esi)
16627+ jne 1b
16628+
16629+ ret
16630+ CFI_ENDPROC
16631+ENDPROC(atomic64_set_unchecked_cx8)
16632+
16633 ENTRY(atomic64_xchg_cx8)
16634 CFI_STARTPROC
16635
16636@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16637 CFI_ENDPROC
16638 ENDPROC(atomic64_xchg_cx8)
16639
16640-.macro addsub_return func ins insc
16641-ENTRY(atomic64_\func\()_return_cx8)
16642+.macro addsub_return func ins insc unchecked=""
16643+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16644 CFI_STARTPROC
16645 SAVE ebp
16646 SAVE ebx
16647@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16648 movl %edx, %ecx
16649 \ins\()l %esi, %ebx
16650 \insc\()l %edi, %ecx
16651+
16652+.ifb \unchecked
16653+#ifdef CONFIG_PAX_REFCOUNT
16654+ into
16655+2:
16656+ _ASM_EXTABLE(2b, 3f)
16657+#endif
16658+.endif
16659+
16660 LOCK_PREFIX
16661 cmpxchg8b (%ebp)
16662 jne 1b
16663-
16664-10:
16665 movl %ebx, %eax
16666 movl %ecx, %edx
16667+
16668+.ifb \unchecked
16669+#ifdef CONFIG_PAX_REFCOUNT
16670+3:
16671+#endif
16672+.endif
16673+
16674 RESTORE edi
16675 RESTORE esi
16676 RESTORE ebx
16677 RESTORE ebp
16678 ret
16679 CFI_ENDPROC
16680-ENDPROC(atomic64_\func\()_return_cx8)
16681+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16682 .endm
16683
16684 addsub_return add add adc
16685 addsub_return sub sub sbb
16686+addsub_return add add adc _unchecked
16687+addsub_return sub sub sbb _unchecked
16688
16689-.macro incdec_return func ins insc
16690-ENTRY(atomic64_\func\()_return_cx8)
16691+.macro incdec_return func ins insc unchecked
16692+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16693 CFI_STARTPROC
16694 SAVE ebx
16695
16696@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16697 movl %edx, %ecx
16698 \ins\()l $1, %ebx
16699 \insc\()l $0, %ecx
16700+
16701+.ifb \unchecked
16702+#ifdef CONFIG_PAX_REFCOUNT
16703+ into
16704+2:
16705+ _ASM_EXTABLE(2b, 3f)
16706+#endif
16707+.endif
16708+
16709 LOCK_PREFIX
16710 cmpxchg8b (%esi)
16711 jne 1b
16712
16713-10:
16714 movl %ebx, %eax
16715 movl %ecx, %edx
16716+
16717+.ifb \unchecked
16718+#ifdef CONFIG_PAX_REFCOUNT
16719+3:
16720+#endif
16721+.endif
16722+
16723 RESTORE ebx
16724 ret
16725 CFI_ENDPROC
16726-ENDPROC(atomic64_\func\()_return_cx8)
16727+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16728 .endm
16729
16730 incdec_return inc add adc
16731 incdec_return dec sub sbb
16732+incdec_return inc add adc _unchecked
16733+incdec_return dec sub sbb _unchecked
16734
16735 ENTRY(atomic64_dec_if_positive_cx8)
16736 CFI_STARTPROC
16737@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16738 movl %edx, %ecx
16739 subl $1, %ebx
16740 sbb $0, %ecx
16741+
16742+#ifdef CONFIG_PAX_REFCOUNT
16743+ into
16744+1234:
16745+ _ASM_EXTABLE(1234b, 2f)
16746+#endif
16747+
16748 js 2f
16749 LOCK_PREFIX
16750 cmpxchg8b (%esi)
16751@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16752 movl %edx, %ecx
16753 addl %esi, %ebx
16754 adcl %edi, %ecx
16755+
16756+#ifdef CONFIG_PAX_REFCOUNT
16757+ into
16758+1234:
16759+ _ASM_EXTABLE(1234b, 3f)
16760+#endif
16761+
16762 LOCK_PREFIX
16763 cmpxchg8b (%ebp)
16764 jne 1b
16765@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16766 movl %edx, %ecx
16767 addl $1, %ebx
16768 adcl $0, %ecx
16769+
16770+#ifdef CONFIG_PAX_REFCOUNT
16771+ into
16772+1234:
16773+ _ASM_EXTABLE(1234b, 3f)
16774+#endif
16775+
16776 LOCK_PREFIX
16777 cmpxchg8b (%esi)
16778 jne 1b
16779diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16780--- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16781+++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16782@@ -28,7 +28,8 @@
16783 #include <linux/linkage.h>
16784 #include <asm/dwarf2.h>
16785 #include <asm/errno.h>
16786-
16787+#include <asm/segment.h>
16788+
16789 /*
16790 * computes a partial checksum, e.g. for TCP/UDP fragments
16791 */
16792@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16793
16794 #define ARGBASE 16
16795 #define FP 12
16796-
16797-ENTRY(csum_partial_copy_generic)
16798+
16799+ENTRY(csum_partial_copy_generic_to_user)
16800 CFI_STARTPROC
16801+
16802+#ifdef CONFIG_PAX_MEMORY_UDEREF
16803+ pushl_cfi %gs
16804+ popl_cfi %es
16805+ jmp csum_partial_copy_generic
16806+#endif
16807+
16808+ENTRY(csum_partial_copy_generic_from_user)
16809+
16810+#ifdef CONFIG_PAX_MEMORY_UDEREF
16811+ pushl_cfi %gs
16812+ popl_cfi %ds
16813+#endif
16814+
16815+ENTRY(csum_partial_copy_generic)
16816 subl $4,%esp
16817 CFI_ADJUST_CFA_OFFSET 4
16818 pushl_cfi %edi
16819@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16820 jmp 4f
16821 SRC(1: movw (%esi), %bx )
16822 addl $2, %esi
16823-DST( movw %bx, (%edi) )
16824+DST( movw %bx, %es:(%edi) )
16825 addl $2, %edi
16826 addw %bx, %ax
16827 adcl $0, %eax
16828@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16829 SRC(1: movl (%esi), %ebx )
16830 SRC( movl 4(%esi), %edx )
16831 adcl %ebx, %eax
16832-DST( movl %ebx, (%edi) )
16833+DST( movl %ebx, %es:(%edi) )
16834 adcl %edx, %eax
16835-DST( movl %edx, 4(%edi) )
16836+DST( movl %edx, %es:4(%edi) )
16837
16838 SRC( movl 8(%esi), %ebx )
16839 SRC( movl 12(%esi), %edx )
16840 adcl %ebx, %eax
16841-DST( movl %ebx, 8(%edi) )
16842+DST( movl %ebx, %es:8(%edi) )
16843 adcl %edx, %eax
16844-DST( movl %edx, 12(%edi) )
16845+DST( movl %edx, %es:12(%edi) )
16846
16847 SRC( movl 16(%esi), %ebx )
16848 SRC( movl 20(%esi), %edx )
16849 adcl %ebx, %eax
16850-DST( movl %ebx, 16(%edi) )
16851+DST( movl %ebx, %es:16(%edi) )
16852 adcl %edx, %eax
16853-DST( movl %edx, 20(%edi) )
16854+DST( movl %edx, %es:20(%edi) )
16855
16856 SRC( movl 24(%esi), %ebx )
16857 SRC( movl 28(%esi), %edx )
16858 adcl %ebx, %eax
16859-DST( movl %ebx, 24(%edi) )
16860+DST( movl %ebx, %es:24(%edi) )
16861 adcl %edx, %eax
16862-DST( movl %edx, 28(%edi) )
16863+DST( movl %edx, %es:28(%edi) )
16864
16865 lea 32(%esi), %esi
16866 lea 32(%edi), %edi
16867@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16868 shrl $2, %edx # This clears CF
16869 SRC(3: movl (%esi), %ebx )
16870 adcl %ebx, %eax
16871-DST( movl %ebx, (%edi) )
16872+DST( movl %ebx, %es:(%edi) )
16873 lea 4(%esi), %esi
16874 lea 4(%edi), %edi
16875 dec %edx
16876@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16877 jb 5f
16878 SRC( movw (%esi), %cx )
16879 leal 2(%esi), %esi
16880-DST( movw %cx, (%edi) )
16881+DST( movw %cx, %es:(%edi) )
16882 leal 2(%edi), %edi
16883 je 6f
16884 shll $16,%ecx
16885 SRC(5: movb (%esi), %cl )
16886-DST( movb %cl, (%edi) )
16887+DST( movb %cl, %es:(%edi) )
16888 6: addl %ecx, %eax
16889 adcl $0, %eax
16890 7:
16891@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16892
16893 6001:
16894 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16895- movl $-EFAULT, (%ebx)
16896+ movl $-EFAULT, %ss:(%ebx)
16897
16898 # zero the complete destination - computing the rest
16899 # is too much work
16900@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16901
16902 6002:
16903 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16904- movl $-EFAULT,(%ebx)
16905+ movl $-EFAULT,%ss:(%ebx)
16906 jmp 5000b
16907
16908 .previous
16909
16910+ pushl_cfi %ss
16911+ popl_cfi %ds
16912+ pushl_cfi %ss
16913+ popl_cfi %es
16914 popl_cfi %ebx
16915 CFI_RESTORE ebx
16916 popl_cfi %esi
16917@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16918 popl_cfi %ecx # equivalent to addl $4,%esp
16919 ret
16920 CFI_ENDPROC
16921-ENDPROC(csum_partial_copy_generic)
16922+ENDPROC(csum_partial_copy_generic_to_user)
16923
16924 #else
16925
16926 /* Version for PentiumII/PPro */
16927
16928 #define ROUND1(x) \
16929+ nop; nop; nop; \
16930 SRC(movl x(%esi), %ebx ) ; \
16931 addl %ebx, %eax ; \
16932- DST(movl %ebx, x(%edi) ) ;
16933+ DST(movl %ebx, %es:x(%edi)) ;
16934
16935 #define ROUND(x) \
16936+ nop; nop; nop; \
16937 SRC(movl x(%esi), %ebx ) ; \
16938 adcl %ebx, %eax ; \
16939- DST(movl %ebx, x(%edi) ) ;
16940+ DST(movl %ebx, %es:x(%edi)) ;
16941
16942 #define ARGBASE 12
16943-
16944-ENTRY(csum_partial_copy_generic)
16945+
16946+ENTRY(csum_partial_copy_generic_to_user)
16947 CFI_STARTPROC
16948+
16949+#ifdef CONFIG_PAX_MEMORY_UDEREF
16950+ pushl_cfi %gs
16951+ popl_cfi %es
16952+ jmp csum_partial_copy_generic
16953+#endif
16954+
16955+ENTRY(csum_partial_copy_generic_from_user)
16956+
16957+#ifdef CONFIG_PAX_MEMORY_UDEREF
16958+ pushl_cfi %gs
16959+ popl_cfi %ds
16960+#endif
16961+
16962+ENTRY(csum_partial_copy_generic)
16963 pushl_cfi %ebx
16964 CFI_REL_OFFSET ebx, 0
16965 pushl_cfi %edi
16966@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16967 subl %ebx, %edi
16968 lea -1(%esi),%edx
16969 andl $-32,%edx
16970- lea 3f(%ebx,%ebx), %ebx
16971+ lea 3f(%ebx,%ebx,2), %ebx
16972 testl %esi, %esi
16973 jmp *%ebx
16974 1: addl $64,%esi
16975@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16976 jb 5f
16977 SRC( movw (%esi), %dx )
16978 leal 2(%esi), %esi
16979-DST( movw %dx, (%edi) )
16980+DST( movw %dx, %es:(%edi) )
16981 leal 2(%edi), %edi
16982 je 6f
16983 shll $16,%edx
16984 5:
16985 SRC( movb (%esi), %dl )
16986-DST( movb %dl, (%edi) )
16987+DST( movb %dl, %es:(%edi) )
16988 6: addl %edx, %eax
16989 adcl $0, %eax
16990 7:
16991 .section .fixup, "ax"
16992 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16993- movl $-EFAULT, (%ebx)
16994+ movl $-EFAULT, %ss:(%ebx)
16995 # zero the complete destination (computing the rest is too much work)
16996 movl ARGBASE+8(%esp),%edi # dst
16997 movl ARGBASE+12(%esp),%ecx # len
16998@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16999 rep; stosb
17000 jmp 7b
17001 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17002- movl $-EFAULT, (%ebx)
17003+ movl $-EFAULT, %ss:(%ebx)
17004 jmp 7b
17005 .previous
17006
17007+#ifdef CONFIG_PAX_MEMORY_UDEREF
17008+ pushl_cfi %ss
17009+ popl_cfi %ds
17010+ pushl_cfi %ss
17011+ popl_cfi %es
17012+#endif
17013+
17014 popl_cfi %esi
17015 CFI_RESTORE esi
17016 popl_cfi %edi
17017@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17018 CFI_RESTORE ebx
17019 ret
17020 CFI_ENDPROC
17021-ENDPROC(csum_partial_copy_generic)
17022+ENDPROC(csum_partial_copy_generic_to_user)
17023
17024 #undef ROUND
17025 #undef ROUND1
17026diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17027--- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17028+++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17029@@ -43,7 +43,7 @@ ENDPROC(clear_page)
17030
17031 #include <asm/cpufeature.h>
17032
17033- .section .altinstr_replacement,"ax"
17034+ .section .altinstr_replacement,"a"
17035 1: .byte 0xeb /* jmp <disp8> */
17036 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17037 2:
17038diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17039--- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17040+++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17041@@ -104,7 +104,7 @@ ENDPROC(copy_page)
17042
17043 #include <asm/cpufeature.h>
17044
17045- .section .altinstr_replacement,"ax"
17046+ .section .altinstr_replacement,"a"
17047 1: .byte 0xeb /* jmp <disp8> */
17048 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17049 2:
17050diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17051--- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17052+++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17053@@ -15,13 +15,14 @@
17054 #include <asm/asm-offsets.h>
17055 #include <asm/thread_info.h>
17056 #include <asm/cpufeature.h>
17057+#include <asm/pgtable.h>
17058
17059 .macro ALTERNATIVE_JUMP feature,orig,alt
17060 0:
17061 .byte 0xe9 /* 32bit jump */
17062 .long \orig-1f /* by default jump to orig */
17063 1:
17064- .section .altinstr_replacement,"ax"
17065+ .section .altinstr_replacement,"a"
17066 2: .byte 0xe9 /* near jump with 32bit immediate */
17067 .long \alt-1b /* offset */ /* or alternatively to alt */
17068 .previous
17069@@ -64,37 +65,13 @@
17070 #endif
17071 .endm
17072
17073-/* Standard copy_to_user with segment limit checking */
17074-ENTRY(_copy_to_user)
17075- CFI_STARTPROC
17076- GET_THREAD_INFO(%rax)
17077- movq %rdi,%rcx
17078- addq %rdx,%rcx
17079- jc bad_to_user
17080- cmpq TI_addr_limit(%rax),%rcx
17081- ja bad_to_user
17082- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17083- CFI_ENDPROC
17084-ENDPROC(_copy_to_user)
17085-
17086-/* Standard copy_from_user with segment limit checking */
17087-ENTRY(_copy_from_user)
17088- CFI_STARTPROC
17089- GET_THREAD_INFO(%rax)
17090- movq %rsi,%rcx
17091- addq %rdx,%rcx
17092- jc bad_from_user
17093- cmpq TI_addr_limit(%rax),%rcx
17094- ja bad_from_user
17095- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17096- CFI_ENDPROC
17097-ENDPROC(_copy_from_user)
17098-
17099 .section .fixup,"ax"
17100 /* must zero dest */
17101 ENTRY(bad_from_user)
17102 bad_from_user:
17103 CFI_STARTPROC
17104+ testl %edx,%edx
17105+ js bad_to_user
17106 movl %edx,%ecx
17107 xorl %eax,%eax
17108 rep
17109diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17110--- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17111+++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17112@@ -14,6 +14,7 @@
17113 #include <asm/current.h>
17114 #include <asm/asm-offsets.h>
17115 #include <asm/thread_info.h>
17116+#include <asm/pgtable.h>
17117
17118 .macro ALIGN_DESTINATION
17119 #ifdef FIX_ALIGNMENT
17120@@ -50,6 +51,15 @@
17121 */
17122 ENTRY(__copy_user_nocache)
17123 CFI_STARTPROC
17124+
17125+#ifdef CONFIG_PAX_MEMORY_UDEREF
17126+ mov $PAX_USER_SHADOW_BASE,%rcx
17127+ cmp %rcx,%rsi
17128+ jae 1f
17129+ add %rcx,%rsi
17130+1:
17131+#endif
17132+
17133 cmpl $8,%edx
17134 jb 20f /* less then 8 bytes, go to byte copy loop */
17135 ALIGN_DESTINATION
17136diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17137--- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17138+++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17139@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17140 len -= 2;
17141 }
17142 }
17143+
17144+#ifdef CONFIG_PAX_MEMORY_UDEREF
17145+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17146+ src += PAX_USER_SHADOW_BASE;
17147+#endif
17148+
17149 isum = csum_partial_copy_generic((__force const void *)src,
17150 dst, len, isum, errp, NULL);
17151 if (unlikely(*errp))
17152@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17153 }
17154
17155 *errp = 0;
17156+
17157+#ifdef CONFIG_PAX_MEMORY_UDEREF
17158+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17159+ dst += PAX_USER_SHADOW_BASE;
17160+#endif
17161+
17162 return csum_partial_copy_generic(src, (void __force *)dst,
17163 len, isum, NULL, errp);
17164 }
17165diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17166--- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17167+++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17168@@ -33,14 +33,35 @@
17169 #include <asm/asm-offsets.h>
17170 #include <asm/thread_info.h>
17171 #include <asm/asm.h>
17172+#include <asm/segment.h>
17173+#include <asm/pgtable.h>
17174+
17175+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17176+#define __copyuser_seg gs;
17177+#else
17178+#define __copyuser_seg
17179+#endif
17180
17181 .text
17182 ENTRY(__get_user_1)
17183 CFI_STARTPROC
17184+
17185+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17186 GET_THREAD_INFO(%_ASM_DX)
17187 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17188 jae bad_get_user
17189-1: movzb (%_ASM_AX),%edx
17190+
17191+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17192+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17193+ cmp %_ASM_DX,%_ASM_AX
17194+ jae 1234f
17195+ add %_ASM_DX,%_ASM_AX
17196+1234:
17197+#endif
17198+
17199+#endif
17200+
17201+1: __copyuser_seg movzb (%_ASM_AX),%edx
17202 xor %eax,%eax
17203 ret
17204 CFI_ENDPROC
17205@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17206 ENTRY(__get_user_2)
17207 CFI_STARTPROC
17208 add $1,%_ASM_AX
17209+
17210+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17211 jc bad_get_user
17212 GET_THREAD_INFO(%_ASM_DX)
17213 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17214 jae bad_get_user
17215-2: movzwl -1(%_ASM_AX),%edx
17216+
17217+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17218+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17219+ cmp %_ASM_DX,%_ASM_AX
17220+ jae 1234f
17221+ add %_ASM_DX,%_ASM_AX
17222+1234:
17223+#endif
17224+
17225+#endif
17226+
17227+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17228 xor %eax,%eax
17229 ret
17230 CFI_ENDPROC
17231@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17232 ENTRY(__get_user_4)
17233 CFI_STARTPROC
17234 add $3,%_ASM_AX
17235+
17236+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17237 jc bad_get_user
17238 GET_THREAD_INFO(%_ASM_DX)
17239 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17240 jae bad_get_user
17241-3: mov -3(%_ASM_AX),%edx
17242+
17243+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17244+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17245+ cmp %_ASM_DX,%_ASM_AX
17246+ jae 1234f
17247+ add %_ASM_DX,%_ASM_AX
17248+1234:
17249+#endif
17250+
17251+#endif
17252+
17253+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17254 xor %eax,%eax
17255 ret
17256 CFI_ENDPROC
17257@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17258 GET_THREAD_INFO(%_ASM_DX)
17259 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17260 jae bad_get_user
17261+
17262+#ifdef CONFIG_PAX_MEMORY_UDEREF
17263+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17264+ cmp %_ASM_DX,%_ASM_AX
17265+ jae 1234f
17266+ add %_ASM_DX,%_ASM_AX
17267+1234:
17268+#endif
17269+
17270 4: movq -7(%_ASM_AX),%_ASM_DX
17271 xor %eax,%eax
17272 ret
17273diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17274--- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17275+++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17276@@ -21,6 +21,11 @@
17277 #include <linux/string.h>
17278 #include <asm/inat.h>
17279 #include <asm/insn.h>
17280+#ifdef __KERNEL__
17281+#include <asm/pgtable_types.h>
17282+#else
17283+#define ktla_ktva(addr) addr
17284+#endif
17285
17286 #define get_next(t, insn) \
17287 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17288@@ -40,8 +45,8 @@
17289 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17290 {
17291 memset(insn, 0, sizeof(*insn));
17292- insn->kaddr = kaddr;
17293- insn->next_byte = kaddr;
17294+ insn->kaddr = ktla_ktva(kaddr);
17295+ insn->next_byte = ktla_ktva(kaddr);
17296 insn->x86_64 = x86_64 ? 1 : 0;
17297 insn->opnd_bytes = 4;
17298 if (x86_64)
17299diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17300--- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17301+++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17302@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17303 {
17304 void *p;
17305 int i;
17306+ unsigned long cr0;
17307
17308 if (unlikely(in_interrupt()))
17309 return __memcpy(to, from, len);
17310@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17311 kernel_fpu_begin();
17312
17313 __asm__ __volatile__ (
17314- "1: prefetch (%0)\n" /* This set is 28 bytes */
17315- " prefetch 64(%0)\n"
17316- " prefetch 128(%0)\n"
17317- " prefetch 192(%0)\n"
17318- " prefetch 256(%0)\n"
17319+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17320+ " prefetch 64(%1)\n"
17321+ " prefetch 128(%1)\n"
17322+ " prefetch 192(%1)\n"
17323+ " prefetch 256(%1)\n"
17324 "2: \n"
17325 ".section .fixup, \"ax\"\n"
17326- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17327+ "3: \n"
17328+
17329+#ifdef CONFIG_PAX_KERNEXEC
17330+ " movl %%cr0, %0\n"
17331+ " movl %0, %%eax\n"
17332+ " andl $0xFFFEFFFF, %%eax\n"
17333+ " movl %%eax, %%cr0\n"
17334+#endif
17335+
17336+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17337+
17338+#ifdef CONFIG_PAX_KERNEXEC
17339+ " movl %0, %%cr0\n"
17340+#endif
17341+
17342 " jmp 2b\n"
17343 ".previous\n"
17344 _ASM_EXTABLE(1b, 3b)
17345- : : "r" (from));
17346+ : "=&r" (cr0) : "r" (from) : "ax");
17347
17348 for ( ; i > 5; i--) {
17349 __asm__ __volatile__ (
17350- "1: prefetch 320(%0)\n"
17351- "2: movq (%0), %%mm0\n"
17352- " movq 8(%0), %%mm1\n"
17353- " movq 16(%0), %%mm2\n"
17354- " movq 24(%0), %%mm3\n"
17355- " movq %%mm0, (%1)\n"
17356- " movq %%mm1, 8(%1)\n"
17357- " movq %%mm2, 16(%1)\n"
17358- " movq %%mm3, 24(%1)\n"
17359- " movq 32(%0), %%mm0\n"
17360- " movq 40(%0), %%mm1\n"
17361- " movq 48(%0), %%mm2\n"
17362- " movq 56(%0), %%mm3\n"
17363- " movq %%mm0, 32(%1)\n"
17364- " movq %%mm1, 40(%1)\n"
17365- " movq %%mm2, 48(%1)\n"
17366- " movq %%mm3, 56(%1)\n"
17367+ "1: prefetch 320(%1)\n"
17368+ "2: movq (%1), %%mm0\n"
17369+ " movq 8(%1), %%mm1\n"
17370+ " movq 16(%1), %%mm2\n"
17371+ " movq 24(%1), %%mm3\n"
17372+ " movq %%mm0, (%2)\n"
17373+ " movq %%mm1, 8(%2)\n"
17374+ " movq %%mm2, 16(%2)\n"
17375+ " movq %%mm3, 24(%2)\n"
17376+ " movq 32(%1), %%mm0\n"
17377+ " movq 40(%1), %%mm1\n"
17378+ " movq 48(%1), %%mm2\n"
17379+ " movq 56(%1), %%mm3\n"
17380+ " movq %%mm0, 32(%2)\n"
17381+ " movq %%mm1, 40(%2)\n"
17382+ " movq %%mm2, 48(%2)\n"
17383+ " movq %%mm3, 56(%2)\n"
17384 ".section .fixup, \"ax\"\n"
17385- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17386+ "3:\n"
17387+
17388+#ifdef CONFIG_PAX_KERNEXEC
17389+ " movl %%cr0, %0\n"
17390+ " movl %0, %%eax\n"
17391+ " andl $0xFFFEFFFF, %%eax\n"
17392+ " movl %%eax, %%cr0\n"
17393+#endif
17394+
17395+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17396+
17397+#ifdef CONFIG_PAX_KERNEXEC
17398+ " movl %0, %%cr0\n"
17399+#endif
17400+
17401 " jmp 2b\n"
17402 ".previous\n"
17403 _ASM_EXTABLE(1b, 3b)
17404- : : "r" (from), "r" (to) : "memory");
17405+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17406
17407 from += 64;
17408 to += 64;
17409@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17410 static void fast_copy_page(void *to, void *from)
17411 {
17412 int i;
17413+ unsigned long cr0;
17414
17415 kernel_fpu_begin();
17416
17417@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17418 * but that is for later. -AV
17419 */
17420 __asm__ __volatile__(
17421- "1: prefetch (%0)\n"
17422- " prefetch 64(%0)\n"
17423- " prefetch 128(%0)\n"
17424- " prefetch 192(%0)\n"
17425- " prefetch 256(%0)\n"
17426+ "1: prefetch (%1)\n"
17427+ " prefetch 64(%1)\n"
17428+ " prefetch 128(%1)\n"
17429+ " prefetch 192(%1)\n"
17430+ " prefetch 256(%1)\n"
17431 "2: \n"
17432 ".section .fixup, \"ax\"\n"
17433- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17434+ "3: \n"
17435+
17436+#ifdef CONFIG_PAX_KERNEXEC
17437+ " movl %%cr0, %0\n"
17438+ " movl %0, %%eax\n"
17439+ " andl $0xFFFEFFFF, %%eax\n"
17440+ " movl %%eax, %%cr0\n"
17441+#endif
17442+
17443+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17444+
17445+#ifdef CONFIG_PAX_KERNEXEC
17446+ " movl %0, %%cr0\n"
17447+#endif
17448+
17449 " jmp 2b\n"
17450 ".previous\n"
17451- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17452+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17453
17454 for (i = 0; i < (4096-320)/64; i++) {
17455 __asm__ __volatile__ (
17456- "1: prefetch 320(%0)\n"
17457- "2: movq (%0), %%mm0\n"
17458- " movntq %%mm0, (%1)\n"
17459- " movq 8(%0), %%mm1\n"
17460- " movntq %%mm1, 8(%1)\n"
17461- " movq 16(%0), %%mm2\n"
17462- " movntq %%mm2, 16(%1)\n"
17463- " movq 24(%0), %%mm3\n"
17464- " movntq %%mm3, 24(%1)\n"
17465- " movq 32(%0), %%mm4\n"
17466- " movntq %%mm4, 32(%1)\n"
17467- " movq 40(%0), %%mm5\n"
17468- " movntq %%mm5, 40(%1)\n"
17469- " movq 48(%0), %%mm6\n"
17470- " movntq %%mm6, 48(%1)\n"
17471- " movq 56(%0), %%mm7\n"
17472- " movntq %%mm7, 56(%1)\n"
17473+ "1: prefetch 320(%1)\n"
17474+ "2: movq (%1), %%mm0\n"
17475+ " movntq %%mm0, (%2)\n"
17476+ " movq 8(%1), %%mm1\n"
17477+ " movntq %%mm1, 8(%2)\n"
17478+ " movq 16(%1), %%mm2\n"
17479+ " movntq %%mm2, 16(%2)\n"
17480+ " movq 24(%1), %%mm3\n"
17481+ " movntq %%mm3, 24(%2)\n"
17482+ " movq 32(%1), %%mm4\n"
17483+ " movntq %%mm4, 32(%2)\n"
17484+ " movq 40(%1), %%mm5\n"
17485+ " movntq %%mm5, 40(%2)\n"
17486+ " movq 48(%1), %%mm6\n"
17487+ " movntq %%mm6, 48(%2)\n"
17488+ " movq 56(%1), %%mm7\n"
17489+ " movntq %%mm7, 56(%2)\n"
17490 ".section .fixup, \"ax\"\n"
17491- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17492+ "3:\n"
17493+
17494+#ifdef CONFIG_PAX_KERNEXEC
17495+ " movl %%cr0, %0\n"
17496+ " movl %0, %%eax\n"
17497+ " andl $0xFFFEFFFF, %%eax\n"
17498+ " movl %%eax, %%cr0\n"
17499+#endif
17500+
17501+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17502+
17503+#ifdef CONFIG_PAX_KERNEXEC
17504+ " movl %0, %%cr0\n"
17505+#endif
17506+
17507 " jmp 2b\n"
17508 ".previous\n"
17509- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17510+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17511
17512 from += 64;
17513 to += 64;
17514@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17515 static void fast_copy_page(void *to, void *from)
17516 {
17517 int i;
17518+ unsigned long cr0;
17519
17520 kernel_fpu_begin();
17521
17522 __asm__ __volatile__ (
17523- "1: prefetch (%0)\n"
17524- " prefetch 64(%0)\n"
17525- " prefetch 128(%0)\n"
17526- " prefetch 192(%0)\n"
17527- " prefetch 256(%0)\n"
17528+ "1: prefetch (%1)\n"
17529+ " prefetch 64(%1)\n"
17530+ " prefetch 128(%1)\n"
17531+ " prefetch 192(%1)\n"
17532+ " prefetch 256(%1)\n"
17533 "2: \n"
17534 ".section .fixup, \"ax\"\n"
17535- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17536+ "3: \n"
17537+
17538+#ifdef CONFIG_PAX_KERNEXEC
17539+ " movl %%cr0, %0\n"
17540+ " movl %0, %%eax\n"
17541+ " andl $0xFFFEFFFF, %%eax\n"
17542+ " movl %%eax, %%cr0\n"
17543+#endif
17544+
17545+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17546+
17547+#ifdef CONFIG_PAX_KERNEXEC
17548+ " movl %0, %%cr0\n"
17549+#endif
17550+
17551 " jmp 2b\n"
17552 ".previous\n"
17553- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17554+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17555
17556 for (i = 0; i < 4096/64; i++) {
17557 __asm__ __volatile__ (
17558- "1: prefetch 320(%0)\n"
17559- "2: movq (%0), %%mm0\n"
17560- " movq 8(%0), %%mm1\n"
17561- " movq 16(%0), %%mm2\n"
17562- " movq 24(%0), %%mm3\n"
17563- " movq %%mm0, (%1)\n"
17564- " movq %%mm1, 8(%1)\n"
17565- " movq %%mm2, 16(%1)\n"
17566- " movq %%mm3, 24(%1)\n"
17567- " movq 32(%0), %%mm0\n"
17568- " movq 40(%0), %%mm1\n"
17569- " movq 48(%0), %%mm2\n"
17570- " movq 56(%0), %%mm3\n"
17571- " movq %%mm0, 32(%1)\n"
17572- " movq %%mm1, 40(%1)\n"
17573- " movq %%mm2, 48(%1)\n"
17574- " movq %%mm3, 56(%1)\n"
17575+ "1: prefetch 320(%1)\n"
17576+ "2: movq (%1), %%mm0\n"
17577+ " movq 8(%1), %%mm1\n"
17578+ " movq 16(%1), %%mm2\n"
17579+ " movq 24(%1), %%mm3\n"
17580+ " movq %%mm0, (%2)\n"
17581+ " movq %%mm1, 8(%2)\n"
17582+ " movq %%mm2, 16(%2)\n"
17583+ " movq %%mm3, 24(%2)\n"
17584+ " movq 32(%1), %%mm0\n"
17585+ " movq 40(%1), %%mm1\n"
17586+ " movq 48(%1), %%mm2\n"
17587+ " movq 56(%1), %%mm3\n"
17588+ " movq %%mm0, 32(%2)\n"
17589+ " movq %%mm1, 40(%2)\n"
17590+ " movq %%mm2, 48(%2)\n"
17591+ " movq %%mm3, 56(%2)\n"
17592 ".section .fixup, \"ax\"\n"
17593- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17594+ "3:\n"
17595+
17596+#ifdef CONFIG_PAX_KERNEXEC
17597+ " movl %%cr0, %0\n"
17598+ " movl %0, %%eax\n"
17599+ " andl $0xFFFEFFFF, %%eax\n"
17600+ " movl %%eax, %%cr0\n"
17601+#endif
17602+
17603+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17604+
17605+#ifdef CONFIG_PAX_KERNEXEC
17606+ " movl %0, %%cr0\n"
17607+#endif
17608+
17609 " jmp 2b\n"
17610 ".previous\n"
17611 _ASM_EXTABLE(1b, 3b)
17612- : : "r" (from), "r" (to) : "memory");
17613+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17614
17615 from += 64;
17616 to += 64;
17617diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17618--- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17619+++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17620@@ -15,7 +15,8 @@
17621 #include <asm/thread_info.h>
17622 #include <asm/errno.h>
17623 #include <asm/asm.h>
17624-
17625+#include <asm/segment.h>
17626+#include <asm/pgtable.h>
17627
17628 /*
17629 * __put_user_X
17630@@ -29,52 +30,119 @@
17631 * as they get called from within inline assembly.
17632 */
17633
17634-#define ENTER CFI_STARTPROC ; \
17635- GET_THREAD_INFO(%_ASM_BX)
17636+#define ENTER CFI_STARTPROC
17637 #define EXIT ret ; \
17638 CFI_ENDPROC
17639
17640+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17641+#define _DEST %_ASM_CX,%_ASM_BX
17642+#else
17643+#define _DEST %_ASM_CX
17644+#endif
17645+
17646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17647+#define __copyuser_seg gs;
17648+#else
17649+#define __copyuser_seg
17650+#endif
17651+
17652 .text
17653 ENTRY(__put_user_1)
17654 ENTER
17655+
17656+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17657+ GET_THREAD_INFO(%_ASM_BX)
17658 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17659 jae bad_put_user
17660-1: movb %al,(%_ASM_CX)
17661+
17662+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17663+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17664+ cmp %_ASM_BX,%_ASM_CX
17665+ jb 1234f
17666+ xor %ebx,%ebx
17667+1234:
17668+#endif
17669+
17670+#endif
17671+
17672+1: __copyuser_seg movb %al,(_DEST)
17673 xor %eax,%eax
17674 EXIT
17675 ENDPROC(__put_user_1)
17676
17677 ENTRY(__put_user_2)
17678 ENTER
17679+
17680+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17681+ GET_THREAD_INFO(%_ASM_BX)
17682 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17683 sub $1,%_ASM_BX
17684 cmp %_ASM_BX,%_ASM_CX
17685 jae bad_put_user
17686-2: movw %ax,(%_ASM_CX)
17687+
17688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17689+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17690+ cmp %_ASM_BX,%_ASM_CX
17691+ jb 1234f
17692+ xor %ebx,%ebx
17693+1234:
17694+#endif
17695+
17696+#endif
17697+
17698+2: __copyuser_seg movw %ax,(_DEST)
17699 xor %eax,%eax
17700 EXIT
17701 ENDPROC(__put_user_2)
17702
17703 ENTRY(__put_user_4)
17704 ENTER
17705+
17706+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17707+ GET_THREAD_INFO(%_ASM_BX)
17708 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17709 sub $3,%_ASM_BX
17710 cmp %_ASM_BX,%_ASM_CX
17711 jae bad_put_user
17712-3: movl %eax,(%_ASM_CX)
17713+
17714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17715+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17716+ cmp %_ASM_BX,%_ASM_CX
17717+ jb 1234f
17718+ xor %ebx,%ebx
17719+1234:
17720+#endif
17721+
17722+#endif
17723+
17724+3: __copyuser_seg movl %eax,(_DEST)
17725 xor %eax,%eax
17726 EXIT
17727 ENDPROC(__put_user_4)
17728
17729 ENTRY(__put_user_8)
17730 ENTER
17731+
17732+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17733+ GET_THREAD_INFO(%_ASM_BX)
17734 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17735 sub $7,%_ASM_BX
17736 cmp %_ASM_BX,%_ASM_CX
17737 jae bad_put_user
17738-4: mov %_ASM_AX,(%_ASM_CX)
17739+
17740+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17741+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17742+ cmp %_ASM_BX,%_ASM_CX
17743+ jb 1234f
17744+ xor %ebx,%ebx
17745+1234:
17746+#endif
17747+
17748+#endif
17749+
17750+4: __copyuser_seg mov %_ASM_AX,(_DEST)
17751 #ifdef CONFIG_X86_32
17752-5: movl %edx,4(%_ASM_CX)
17753+5: __copyuser_seg movl %edx,4(_DEST)
17754 #endif
17755 xor %eax,%eax
17756 EXIT
17757diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17758--- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17759+++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17760@@ -43,7 +43,7 @@ do { \
17761 __asm__ __volatile__( \
17762 " testl %1,%1\n" \
17763 " jz 2f\n" \
17764- "0: lodsb\n" \
17765+ "0: "__copyuser_seg"lodsb\n" \
17766 " stosb\n" \
17767 " testb %%al,%%al\n" \
17768 " jz 1f\n" \
17769@@ -128,10 +128,12 @@ do { \
17770 int __d0; \
17771 might_fault(); \
17772 __asm__ __volatile__( \
17773+ __COPYUSER_SET_ES \
17774 "0: rep; stosl\n" \
17775 " movl %2,%0\n" \
17776 "1: rep; stosb\n" \
17777 "2:\n" \
17778+ __COPYUSER_RESTORE_ES \
17779 ".section .fixup,\"ax\"\n" \
17780 "3: lea 0(%2,%0,4),%0\n" \
17781 " jmp 2b\n" \
17782@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17783 might_fault();
17784
17785 __asm__ __volatile__(
17786+ __COPYUSER_SET_ES
17787 " testl %0, %0\n"
17788 " jz 3f\n"
17789 " andl %0,%%ecx\n"
17790@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17791 " subl %%ecx,%0\n"
17792 " addl %0,%%eax\n"
17793 "1:\n"
17794+ __COPYUSER_RESTORE_ES
17795 ".section .fixup,\"ax\"\n"
17796 "2: xorl %%eax,%%eax\n"
17797 " jmp 1b\n"
17798@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17799
17800 #ifdef CONFIG_X86_INTEL_USERCOPY
17801 static unsigned long
17802-__copy_user_intel(void __user *to, const void *from, unsigned long size)
17803+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17804 {
17805 int d0, d1;
17806 __asm__ __volatile__(
17807@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17808 " .align 2,0x90\n"
17809 "3: movl 0(%4), %%eax\n"
17810 "4: movl 4(%4), %%edx\n"
17811- "5: movl %%eax, 0(%3)\n"
17812- "6: movl %%edx, 4(%3)\n"
17813+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17814+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17815 "7: movl 8(%4), %%eax\n"
17816 "8: movl 12(%4),%%edx\n"
17817- "9: movl %%eax, 8(%3)\n"
17818- "10: movl %%edx, 12(%3)\n"
17819+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17820+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17821 "11: movl 16(%4), %%eax\n"
17822 "12: movl 20(%4), %%edx\n"
17823- "13: movl %%eax, 16(%3)\n"
17824- "14: movl %%edx, 20(%3)\n"
17825+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17826+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17827 "15: movl 24(%4), %%eax\n"
17828 "16: movl 28(%4), %%edx\n"
17829- "17: movl %%eax, 24(%3)\n"
17830- "18: movl %%edx, 28(%3)\n"
17831+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17832+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17833 "19: movl 32(%4), %%eax\n"
17834 "20: movl 36(%4), %%edx\n"
17835- "21: movl %%eax, 32(%3)\n"
17836- "22: movl %%edx, 36(%3)\n"
17837+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17838+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17839 "23: movl 40(%4), %%eax\n"
17840 "24: movl 44(%4), %%edx\n"
17841- "25: movl %%eax, 40(%3)\n"
17842- "26: movl %%edx, 44(%3)\n"
17843+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17844+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17845 "27: movl 48(%4), %%eax\n"
17846 "28: movl 52(%4), %%edx\n"
17847- "29: movl %%eax, 48(%3)\n"
17848- "30: movl %%edx, 52(%3)\n"
17849+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17850+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17851 "31: movl 56(%4), %%eax\n"
17852 "32: movl 60(%4), %%edx\n"
17853- "33: movl %%eax, 56(%3)\n"
17854- "34: movl %%edx, 60(%3)\n"
17855+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17856+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17857 " addl $-64, %0\n"
17858 " addl $64, %4\n"
17859 " addl $64, %3\n"
17860@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17861 " shrl $2, %0\n"
17862 " andl $3, %%eax\n"
17863 " cld\n"
17864+ __COPYUSER_SET_ES
17865 "99: rep; movsl\n"
17866 "36: movl %%eax, %0\n"
17867 "37: rep; movsb\n"
17868 "100:\n"
17869+ __COPYUSER_RESTORE_ES
17870+ ".section .fixup,\"ax\"\n"
17871+ "101: lea 0(%%eax,%0,4),%0\n"
17872+ " jmp 100b\n"
17873+ ".previous\n"
17874+ ".section __ex_table,\"a\"\n"
17875+ " .align 4\n"
17876+ " .long 1b,100b\n"
17877+ " .long 2b,100b\n"
17878+ " .long 3b,100b\n"
17879+ " .long 4b,100b\n"
17880+ " .long 5b,100b\n"
17881+ " .long 6b,100b\n"
17882+ " .long 7b,100b\n"
17883+ " .long 8b,100b\n"
17884+ " .long 9b,100b\n"
17885+ " .long 10b,100b\n"
17886+ " .long 11b,100b\n"
17887+ " .long 12b,100b\n"
17888+ " .long 13b,100b\n"
17889+ " .long 14b,100b\n"
17890+ " .long 15b,100b\n"
17891+ " .long 16b,100b\n"
17892+ " .long 17b,100b\n"
17893+ " .long 18b,100b\n"
17894+ " .long 19b,100b\n"
17895+ " .long 20b,100b\n"
17896+ " .long 21b,100b\n"
17897+ " .long 22b,100b\n"
17898+ " .long 23b,100b\n"
17899+ " .long 24b,100b\n"
17900+ " .long 25b,100b\n"
17901+ " .long 26b,100b\n"
17902+ " .long 27b,100b\n"
17903+ " .long 28b,100b\n"
17904+ " .long 29b,100b\n"
17905+ " .long 30b,100b\n"
17906+ " .long 31b,100b\n"
17907+ " .long 32b,100b\n"
17908+ " .long 33b,100b\n"
17909+ " .long 34b,100b\n"
17910+ " .long 35b,100b\n"
17911+ " .long 36b,100b\n"
17912+ " .long 37b,100b\n"
17913+ " .long 99b,101b\n"
17914+ ".previous"
17915+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
17916+ : "1"(to), "2"(from), "0"(size)
17917+ : "eax", "edx", "memory");
17918+ return size;
17919+}
17920+
17921+static unsigned long
17922+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17923+{
17924+ int d0, d1;
17925+ __asm__ __volatile__(
17926+ " .align 2,0x90\n"
17927+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17928+ " cmpl $67, %0\n"
17929+ " jbe 3f\n"
17930+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17931+ " .align 2,0x90\n"
17932+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17933+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17934+ "5: movl %%eax, 0(%3)\n"
17935+ "6: movl %%edx, 4(%3)\n"
17936+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17937+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17938+ "9: movl %%eax, 8(%3)\n"
17939+ "10: movl %%edx, 12(%3)\n"
17940+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17941+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17942+ "13: movl %%eax, 16(%3)\n"
17943+ "14: movl %%edx, 20(%3)\n"
17944+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17945+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17946+ "17: movl %%eax, 24(%3)\n"
17947+ "18: movl %%edx, 28(%3)\n"
17948+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17949+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17950+ "21: movl %%eax, 32(%3)\n"
17951+ "22: movl %%edx, 36(%3)\n"
17952+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17953+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17954+ "25: movl %%eax, 40(%3)\n"
17955+ "26: movl %%edx, 44(%3)\n"
17956+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17957+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17958+ "29: movl %%eax, 48(%3)\n"
17959+ "30: movl %%edx, 52(%3)\n"
17960+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17961+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17962+ "33: movl %%eax, 56(%3)\n"
17963+ "34: movl %%edx, 60(%3)\n"
17964+ " addl $-64, %0\n"
17965+ " addl $64, %4\n"
17966+ " addl $64, %3\n"
17967+ " cmpl $63, %0\n"
17968+ " ja 1b\n"
17969+ "35: movl %0, %%eax\n"
17970+ " shrl $2, %0\n"
17971+ " andl $3, %%eax\n"
17972+ " cld\n"
17973+ "99: rep; "__copyuser_seg" movsl\n"
17974+ "36: movl %%eax, %0\n"
17975+ "37: rep; "__copyuser_seg" movsb\n"
17976+ "100:\n"
17977 ".section .fixup,\"ax\"\n"
17978 "101: lea 0(%%eax,%0,4),%0\n"
17979 " jmp 100b\n"
17980@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17981 int d0, d1;
17982 __asm__ __volatile__(
17983 " .align 2,0x90\n"
17984- "0: movl 32(%4), %%eax\n"
17985+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17986 " cmpl $67, %0\n"
17987 " jbe 2f\n"
17988- "1: movl 64(%4), %%eax\n"
17989+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17990 " .align 2,0x90\n"
17991- "2: movl 0(%4), %%eax\n"
17992- "21: movl 4(%4), %%edx\n"
17993+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17994+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17995 " movl %%eax, 0(%3)\n"
17996 " movl %%edx, 4(%3)\n"
17997- "3: movl 8(%4), %%eax\n"
17998- "31: movl 12(%4),%%edx\n"
17999+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18000+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18001 " movl %%eax, 8(%3)\n"
18002 " movl %%edx, 12(%3)\n"
18003- "4: movl 16(%4), %%eax\n"
18004- "41: movl 20(%4), %%edx\n"
18005+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18006+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18007 " movl %%eax, 16(%3)\n"
18008 " movl %%edx, 20(%3)\n"
18009- "10: movl 24(%4), %%eax\n"
18010- "51: movl 28(%4), %%edx\n"
18011+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18012+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18013 " movl %%eax, 24(%3)\n"
18014 " movl %%edx, 28(%3)\n"
18015- "11: movl 32(%4), %%eax\n"
18016- "61: movl 36(%4), %%edx\n"
18017+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18018+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18019 " movl %%eax, 32(%3)\n"
18020 " movl %%edx, 36(%3)\n"
18021- "12: movl 40(%4), %%eax\n"
18022- "71: movl 44(%4), %%edx\n"
18023+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18024+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18025 " movl %%eax, 40(%3)\n"
18026 " movl %%edx, 44(%3)\n"
18027- "13: movl 48(%4), %%eax\n"
18028- "81: movl 52(%4), %%edx\n"
18029+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18030+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18031 " movl %%eax, 48(%3)\n"
18032 " movl %%edx, 52(%3)\n"
18033- "14: movl 56(%4), %%eax\n"
18034- "91: movl 60(%4), %%edx\n"
18035+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18036+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18037 " movl %%eax, 56(%3)\n"
18038 " movl %%edx, 60(%3)\n"
18039 " addl $-64, %0\n"
18040@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18041 " shrl $2, %0\n"
18042 " andl $3, %%eax\n"
18043 " cld\n"
18044- "6: rep; movsl\n"
18045+ "6: rep; "__copyuser_seg" movsl\n"
18046 " movl %%eax,%0\n"
18047- "7: rep; movsb\n"
18048+ "7: rep; "__copyuser_seg" movsb\n"
18049 "8:\n"
18050 ".section .fixup,\"ax\"\n"
18051 "9: lea 0(%%eax,%0,4),%0\n"
18052@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18053
18054 __asm__ __volatile__(
18055 " .align 2,0x90\n"
18056- "0: movl 32(%4), %%eax\n"
18057+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18058 " cmpl $67, %0\n"
18059 " jbe 2f\n"
18060- "1: movl 64(%4), %%eax\n"
18061+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18062 " .align 2,0x90\n"
18063- "2: movl 0(%4), %%eax\n"
18064- "21: movl 4(%4), %%edx\n"
18065+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18066+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18067 " movnti %%eax, 0(%3)\n"
18068 " movnti %%edx, 4(%3)\n"
18069- "3: movl 8(%4), %%eax\n"
18070- "31: movl 12(%4),%%edx\n"
18071+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18072+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18073 " movnti %%eax, 8(%3)\n"
18074 " movnti %%edx, 12(%3)\n"
18075- "4: movl 16(%4), %%eax\n"
18076- "41: movl 20(%4), %%edx\n"
18077+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18078+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18079 " movnti %%eax, 16(%3)\n"
18080 " movnti %%edx, 20(%3)\n"
18081- "10: movl 24(%4), %%eax\n"
18082- "51: movl 28(%4), %%edx\n"
18083+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18084+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18085 " movnti %%eax, 24(%3)\n"
18086 " movnti %%edx, 28(%3)\n"
18087- "11: movl 32(%4), %%eax\n"
18088- "61: movl 36(%4), %%edx\n"
18089+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18090+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18091 " movnti %%eax, 32(%3)\n"
18092 " movnti %%edx, 36(%3)\n"
18093- "12: movl 40(%4), %%eax\n"
18094- "71: movl 44(%4), %%edx\n"
18095+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18096+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18097 " movnti %%eax, 40(%3)\n"
18098 " movnti %%edx, 44(%3)\n"
18099- "13: movl 48(%4), %%eax\n"
18100- "81: movl 52(%4), %%edx\n"
18101+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18102+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18103 " movnti %%eax, 48(%3)\n"
18104 " movnti %%edx, 52(%3)\n"
18105- "14: movl 56(%4), %%eax\n"
18106- "91: movl 60(%4), %%edx\n"
18107+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18108+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18109 " movnti %%eax, 56(%3)\n"
18110 " movnti %%edx, 60(%3)\n"
18111 " addl $-64, %0\n"
18112@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18113 " shrl $2, %0\n"
18114 " andl $3, %%eax\n"
18115 " cld\n"
18116- "6: rep; movsl\n"
18117+ "6: rep; "__copyuser_seg" movsl\n"
18118 " movl %%eax,%0\n"
18119- "7: rep; movsb\n"
18120+ "7: rep; "__copyuser_seg" movsb\n"
18121 "8:\n"
18122 ".section .fixup,\"ax\"\n"
18123 "9: lea 0(%%eax,%0,4),%0\n"
18124@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18125
18126 __asm__ __volatile__(
18127 " .align 2,0x90\n"
18128- "0: movl 32(%4), %%eax\n"
18129+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18130 " cmpl $67, %0\n"
18131 " jbe 2f\n"
18132- "1: movl 64(%4), %%eax\n"
18133+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18134 " .align 2,0x90\n"
18135- "2: movl 0(%4), %%eax\n"
18136- "21: movl 4(%4), %%edx\n"
18137+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18138+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18139 " movnti %%eax, 0(%3)\n"
18140 " movnti %%edx, 4(%3)\n"
18141- "3: movl 8(%4), %%eax\n"
18142- "31: movl 12(%4),%%edx\n"
18143+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18144+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18145 " movnti %%eax, 8(%3)\n"
18146 " movnti %%edx, 12(%3)\n"
18147- "4: movl 16(%4), %%eax\n"
18148- "41: movl 20(%4), %%edx\n"
18149+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18150+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18151 " movnti %%eax, 16(%3)\n"
18152 " movnti %%edx, 20(%3)\n"
18153- "10: movl 24(%4), %%eax\n"
18154- "51: movl 28(%4), %%edx\n"
18155+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18156+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18157 " movnti %%eax, 24(%3)\n"
18158 " movnti %%edx, 28(%3)\n"
18159- "11: movl 32(%4), %%eax\n"
18160- "61: movl 36(%4), %%edx\n"
18161+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18162+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18163 " movnti %%eax, 32(%3)\n"
18164 " movnti %%edx, 36(%3)\n"
18165- "12: movl 40(%4), %%eax\n"
18166- "71: movl 44(%4), %%edx\n"
18167+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18168+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18169 " movnti %%eax, 40(%3)\n"
18170 " movnti %%edx, 44(%3)\n"
18171- "13: movl 48(%4), %%eax\n"
18172- "81: movl 52(%4), %%edx\n"
18173+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18174+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18175 " movnti %%eax, 48(%3)\n"
18176 " movnti %%edx, 52(%3)\n"
18177- "14: movl 56(%4), %%eax\n"
18178- "91: movl 60(%4), %%edx\n"
18179+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18180+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18181 " movnti %%eax, 56(%3)\n"
18182 " movnti %%edx, 60(%3)\n"
18183 " addl $-64, %0\n"
18184@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18185 " shrl $2, %0\n"
18186 " andl $3, %%eax\n"
18187 " cld\n"
18188- "6: rep; movsl\n"
18189+ "6: rep; "__copyuser_seg" movsl\n"
18190 " movl %%eax,%0\n"
18191- "7: rep; movsb\n"
18192+ "7: rep; "__copyuser_seg" movsb\n"
18193 "8:\n"
18194 ".section .fixup,\"ax\"\n"
18195 "9: lea 0(%%eax,%0,4),%0\n"
18196@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18197 */
18198 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18199 unsigned long size);
18200-unsigned long __copy_user_intel(void __user *to, const void *from,
18201+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18202+ unsigned long size);
18203+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18204 unsigned long size);
18205 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18206 const void __user *from, unsigned long size);
18207 #endif /* CONFIG_X86_INTEL_USERCOPY */
18208
18209 /* Generic arbitrary sized copy. */
18210-#define __copy_user(to, from, size) \
18211+#define __copy_user(to, from, size, prefix, set, restore) \
18212 do { \
18213 int __d0, __d1, __d2; \
18214 __asm__ __volatile__( \
18215+ set \
18216 " cmp $7,%0\n" \
18217 " jbe 1f\n" \
18218 " movl %1,%0\n" \
18219 " negl %0\n" \
18220 " andl $7,%0\n" \
18221 " subl %0,%3\n" \
18222- "4: rep; movsb\n" \
18223+ "4: rep; "prefix"movsb\n" \
18224 " movl %3,%0\n" \
18225 " shrl $2,%0\n" \
18226 " andl $3,%3\n" \
18227 " .align 2,0x90\n" \
18228- "0: rep; movsl\n" \
18229+ "0: rep; "prefix"movsl\n" \
18230 " movl %3,%0\n" \
18231- "1: rep; movsb\n" \
18232+ "1: rep; "prefix"movsb\n" \
18233 "2:\n" \
18234+ restore \
18235 ".section .fixup,\"ax\"\n" \
18236 "5: addl %3,%0\n" \
18237 " jmp 2b\n" \
18238@@ -682,14 +799,14 @@ do { \
18239 " negl %0\n" \
18240 " andl $7,%0\n" \
18241 " subl %0,%3\n" \
18242- "4: rep; movsb\n" \
18243+ "4: rep; "__copyuser_seg"movsb\n" \
18244 " movl %3,%0\n" \
18245 " shrl $2,%0\n" \
18246 " andl $3,%3\n" \
18247 " .align 2,0x90\n" \
18248- "0: rep; movsl\n" \
18249+ "0: rep; "__copyuser_seg"movsl\n" \
18250 " movl %3,%0\n" \
18251- "1: rep; movsb\n" \
18252+ "1: rep; "__copyuser_seg"movsb\n" \
18253 "2:\n" \
18254 ".section .fixup,\"ax\"\n" \
18255 "5: addl %3,%0\n" \
18256@@ -775,9 +892,9 @@ survive:
18257 }
18258 #endif
18259 if (movsl_is_ok(to, from, n))
18260- __copy_user(to, from, n);
18261+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18262 else
18263- n = __copy_user_intel(to, from, n);
18264+ n = __generic_copy_to_user_intel(to, from, n);
18265 return n;
18266 }
18267 EXPORT_SYMBOL(__copy_to_user_ll);
18268@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18269 unsigned long n)
18270 {
18271 if (movsl_is_ok(to, from, n))
18272- __copy_user(to, from, n);
18273+ __copy_user(to, from, n, __copyuser_seg, "", "");
18274 else
18275- n = __copy_user_intel((void __user *)to,
18276- (const void *)from, n);
18277+ n = __generic_copy_from_user_intel(to, from, n);
18278 return n;
18279 }
18280 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18281@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18282 if (n > 64 && cpu_has_xmm2)
18283 n = __copy_user_intel_nocache(to, from, n);
18284 else
18285- __copy_user(to, from, n);
18286+ __copy_user(to, from, n, __copyuser_seg, "", "");
18287 #else
18288- __copy_user(to, from, n);
18289+ __copy_user(to, from, n, __copyuser_seg, "", "");
18290 #endif
18291 return n;
18292 }
18293 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18294
18295-/**
18296- * copy_to_user: - Copy a block of data into user space.
18297- * @to: Destination address, in user space.
18298- * @from: Source address, in kernel space.
18299- * @n: Number of bytes to copy.
18300- *
18301- * Context: User context only. This function may sleep.
18302- *
18303- * Copy data from kernel space to user space.
18304- *
18305- * Returns number of bytes that could not be copied.
18306- * On success, this will be zero.
18307- */
18308-unsigned long
18309-copy_to_user(void __user *to, const void *from, unsigned long n)
18310+void copy_from_user_overflow(void)
18311 {
18312- if (access_ok(VERIFY_WRITE, to, n))
18313- n = __copy_to_user(to, from, n);
18314- return n;
18315+ WARN(1, "Buffer overflow detected!\n");
18316 }
18317-EXPORT_SYMBOL(copy_to_user);
18318+EXPORT_SYMBOL(copy_from_user_overflow);
18319
18320-/**
18321- * copy_from_user: - Copy a block of data from user space.
18322- * @to: Destination address, in kernel space.
18323- * @from: Source address, in user space.
18324- * @n: Number of bytes to copy.
18325- *
18326- * Context: User context only. This function may sleep.
18327- *
18328- * Copy data from user space to kernel space.
18329- *
18330- * Returns number of bytes that could not be copied.
18331- * On success, this will be zero.
18332- *
18333- * If some data could not be copied, this function will pad the copied
18334- * data to the requested size using zero bytes.
18335- */
18336-unsigned long
18337-_copy_from_user(void *to, const void __user *from, unsigned long n)
18338+void copy_to_user_overflow(void)
18339 {
18340- if (access_ok(VERIFY_READ, from, n))
18341- n = __copy_from_user(to, from, n);
18342- else
18343- memset(to, 0, n);
18344- return n;
18345+ WARN(1, "Buffer overflow detected!\n");
18346 }
18347-EXPORT_SYMBOL(_copy_from_user);
18348+EXPORT_SYMBOL(copy_to_user_overflow);
18349
18350-void copy_from_user_overflow(void)
18351+#ifdef CONFIG_PAX_MEMORY_UDEREF
18352+void __set_fs(mm_segment_t x)
18353 {
18354- WARN(1, "Buffer overflow detected!\n");
18355+ switch (x.seg) {
18356+ case 0:
18357+ loadsegment(gs, 0);
18358+ break;
18359+ case TASK_SIZE_MAX:
18360+ loadsegment(gs, __USER_DS);
18361+ break;
18362+ case -1UL:
18363+ loadsegment(gs, __KERNEL_DS);
18364+ break;
18365+ default:
18366+ BUG();
18367+ }
18368+ return;
18369 }
18370-EXPORT_SYMBOL(copy_from_user_overflow);
18371+EXPORT_SYMBOL(__set_fs);
18372+
18373+void set_fs(mm_segment_t x)
18374+{
18375+ current_thread_info()->addr_limit = x;
18376+ __set_fs(x);
18377+}
18378+EXPORT_SYMBOL(set_fs);
18379+#endif
18380diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18381--- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18382+++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18383@@ -42,6 +42,12 @@ long
18384 __strncpy_from_user(char *dst, const char __user *src, long count)
18385 {
18386 long res;
18387+
18388+#ifdef CONFIG_PAX_MEMORY_UDEREF
18389+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18390+ src += PAX_USER_SHADOW_BASE;
18391+#endif
18392+
18393 __do_strncpy_from_user(dst, src, count, res);
18394 return res;
18395 }
18396@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18397 {
18398 long __d0;
18399 might_fault();
18400+
18401+#ifdef CONFIG_PAX_MEMORY_UDEREF
18402+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18403+ addr += PAX_USER_SHADOW_BASE;
18404+#endif
18405+
18406 /* no memory constraint because it doesn't change any memory gcc knows
18407 about */
18408 asm volatile(
18409@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18410
18411 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18412 {
18413- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18414+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18415+
18416+#ifdef CONFIG_PAX_MEMORY_UDEREF
18417+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18418+ to += PAX_USER_SHADOW_BASE;
18419+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18420+ from += PAX_USER_SHADOW_BASE;
18421+#endif
18422+
18423 return copy_user_generic((__force void *)to, (__force void *)from, len);
18424- }
18425- return len;
18426+ }
18427+ return len;
18428 }
18429 EXPORT_SYMBOL(copy_in_user);
18430
18431diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18432--- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18433+++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18434@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18435 else
18436 BITS := 64
18437 UTS_MACHINE := x86_64
18438+ biarch := $(call cc-option,-m64)
18439 CHECKFLAGS += -D__x86_64__ -m64
18440
18441 KBUILD_AFLAGS += -m64
18442@@ -195,3 +196,12 @@ define archhelp
18443 echo ' FDARGS="..." arguments for the booted kernel'
18444 echo ' FDINITRD=file initrd for the booted kernel'
18445 endef
18446+
18447+define OLD_LD
18448+
18449+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18450+*** Please upgrade your binutils to 2.18 or newer
18451+endef
18452+
18453+archprepare:
18454+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18455diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18456--- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18457+++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18458@@ -1,14 +1,71 @@
18459 #include <linux/module.h>
18460 #include <linux/spinlock.h>
18461+#include <linux/sort.h>
18462 #include <asm/uaccess.h>
18463+#include <asm/pgtable.h>
18464
18465+/*
18466+ * The exception table needs to be sorted so that the binary
18467+ * search that we use to find entries in it works properly.
18468+ * This is used both for the kernel exception table and for
18469+ * the exception tables of modules that get loaded.
18470+ */
18471+static int cmp_ex(const void *a, const void *b)
18472+{
18473+ const struct exception_table_entry *x = a, *y = b;
18474+
18475+ /* avoid overflow */
18476+ if (x->insn > y->insn)
18477+ return 1;
18478+ if (x->insn < y->insn)
18479+ return -1;
18480+ return 0;
18481+}
18482+
18483+static void swap_ex(void *a, void *b, int size)
18484+{
18485+ struct exception_table_entry t, *x = a, *y = b;
18486+
18487+ t = *x;
18488+
18489+ pax_open_kernel();
18490+ *x = *y;
18491+ *y = t;
18492+ pax_close_kernel();
18493+}
18494+
18495+void sort_extable(struct exception_table_entry *start,
18496+ struct exception_table_entry *finish)
18497+{
18498+ sort(start, finish - start, sizeof(struct exception_table_entry),
18499+ cmp_ex, swap_ex);
18500+}
18501+
18502+#ifdef CONFIG_MODULES
18503+/*
18504+ * If the exception table is sorted, any referring to the module init
18505+ * will be at the beginning or the end.
18506+ */
18507+void trim_init_extable(struct module *m)
18508+{
18509+ /*trim the beginning*/
18510+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18511+ m->extable++;
18512+ m->num_exentries--;
18513+ }
18514+ /*trim the end*/
18515+ while (m->num_exentries &&
18516+ within_module_init(m->extable[m->num_exentries-1].insn, m))
18517+ m->num_exentries--;
18518+}
18519+#endif /* CONFIG_MODULES */
18520
18521 int fixup_exception(struct pt_regs *regs)
18522 {
18523 const struct exception_table_entry *fixup;
18524
18525 #ifdef CONFIG_PNPBIOS
18526- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18527+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18528 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18529 extern u32 pnp_bios_is_utter_crap;
18530 pnp_bios_is_utter_crap = 1;
18531diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18532--- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18533+++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-05 19:44:35.000000000 -0400
18534@@ -12,10 +12,18 @@
18535 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18536 #include <linux/perf_event.h> /* perf_sw_event */
18537 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18538+#include <linux/unistd.h>
18539+#include <linux/compiler.h>
18540
18541 #include <asm/traps.h> /* dotraplinkage, ... */
18542 #include <asm/pgalloc.h> /* pgd_*(), ... */
18543 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18544+#include <asm/vsyscall.h>
18545+#include <asm/tlbflush.h>
18546+
18547+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18548+#include <asm/stacktrace.h>
18549+#endif
18550
18551 /*
18552 * Page fault error code bits:
18553@@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18554 int ret = 0;
18555
18556 /* kprobe_running() needs smp_processor_id() */
18557- if (kprobes_built_in() && !user_mode_vm(regs)) {
18558+ if (kprobes_built_in() && !user_mode(regs)) {
18559 preempt_disable();
18560 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18561 ret = 1;
18562@@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18563 return !instr_lo || (instr_lo>>1) == 1;
18564 case 0x00:
18565 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18566- if (probe_kernel_address(instr, opcode))
18567+ if (user_mode(regs)) {
18568+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18569+ return 0;
18570+ } else if (probe_kernel_address(instr, opcode))
18571 return 0;
18572
18573 *prefetch = (instr_lo == 0xF) &&
18574@@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18575 while (instr < max_instr) {
18576 unsigned char opcode;
18577
18578- if (probe_kernel_address(instr, opcode))
18579+ if (user_mode(regs)) {
18580+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18581+ break;
18582+ } else if (probe_kernel_address(instr, opcode))
18583 break;
18584
18585 instr++;
18586@@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18587 force_sig_info(si_signo, &info, tsk);
18588 }
18589
18590+#ifdef CONFIG_PAX_EMUTRAMP
18591+static int pax_handle_fetch_fault(struct pt_regs *regs);
18592+#endif
18593+
18594+#ifdef CONFIG_PAX_PAGEEXEC
18595+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18596+{
18597+ pgd_t *pgd;
18598+ pud_t *pud;
18599+ pmd_t *pmd;
18600+
18601+ pgd = pgd_offset(mm, address);
18602+ if (!pgd_present(*pgd))
18603+ return NULL;
18604+ pud = pud_offset(pgd, address);
18605+ if (!pud_present(*pud))
18606+ return NULL;
18607+ pmd = pmd_offset(pud, address);
18608+ if (!pmd_present(*pmd))
18609+ return NULL;
18610+ return pmd;
18611+}
18612+#endif
18613+
18614 DEFINE_SPINLOCK(pgd_lock);
18615 LIST_HEAD(pgd_list);
18616
18617@@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18618 for (address = VMALLOC_START & PMD_MASK;
18619 address >= TASK_SIZE && address < FIXADDR_TOP;
18620 address += PMD_SIZE) {
18621+
18622+#ifdef CONFIG_PAX_PER_CPU_PGD
18623+ unsigned long cpu;
18624+#else
18625 struct page *page;
18626+#endif
18627
18628 spin_lock(&pgd_lock);
18629+
18630+#ifdef CONFIG_PAX_PER_CPU_PGD
18631+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18632+ pgd_t *pgd = get_cpu_pgd(cpu);
18633+ pmd_t *ret;
18634+#else
18635 list_for_each_entry(page, &pgd_list, lru) {
18636+ pgd_t *pgd = page_address(page);
18637 spinlock_t *pgt_lock;
18638 pmd_t *ret;
18639
18640@@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18641 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18642
18643 spin_lock(pgt_lock);
18644- ret = vmalloc_sync_one(page_address(page), address);
18645+#endif
18646+
18647+ ret = vmalloc_sync_one(pgd, address);
18648+
18649+#ifndef CONFIG_PAX_PER_CPU_PGD
18650 spin_unlock(pgt_lock);
18651+#endif
18652
18653 if (!ret)
18654 break;
18655@@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18656 * an interrupt in the middle of a task switch..
18657 */
18658 pgd_paddr = read_cr3();
18659+
18660+#ifdef CONFIG_PAX_PER_CPU_PGD
18661+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18662+#endif
18663+
18664 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18665 if (!pmd_k)
18666 return -1;
18667@@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18668 * happen within a race in page table update. In the later
18669 * case just flush:
18670 */
18671+
18672+#ifdef CONFIG_PAX_PER_CPU_PGD
18673+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18674+ pgd = pgd_offset_cpu(smp_processor_id(), address);
18675+#else
18676 pgd = pgd_offset(current->active_mm, address);
18677+#endif
18678+
18679 pgd_ref = pgd_offset_k(address);
18680 if (pgd_none(*pgd_ref))
18681 return -1;
18682@@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18683 static int is_errata100(struct pt_regs *regs, unsigned long address)
18684 {
18685 #ifdef CONFIG_X86_64
18686- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18687+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18688 return 1;
18689 #endif
18690 return 0;
18691@@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18692 }
18693
18694 static const char nx_warning[] = KERN_CRIT
18695-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18696+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18697
18698 static void
18699 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18700@@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18701 if (!oops_may_print())
18702 return;
18703
18704- if (error_code & PF_INSTR) {
18705+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18706 unsigned int level;
18707
18708 pte_t *pte = lookup_address(address, &level);
18709
18710 if (pte && pte_present(*pte) && !pte_exec(*pte))
18711- printk(nx_warning, current_uid());
18712+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18713 }
18714
18715+#ifdef CONFIG_PAX_KERNEXEC
18716+ if (init_mm.start_code <= address && address < init_mm.end_code) {
18717+ if (current->signal->curr_ip)
18718+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18719+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18720+ else
18721+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18722+ current->comm, task_pid_nr(current), current_uid(), current_euid());
18723+ }
18724+#endif
18725+
18726 printk(KERN_ALERT "BUG: unable to handle kernel ");
18727 if (address < PAGE_SIZE)
18728 printk(KERN_CONT "NULL pointer dereference");
18729@@ -701,6 +779,68 @@ __bad_area_nosemaphore(struct pt_regs *r
18730 unsigned long address, int si_code)
18731 {
18732 struct task_struct *tsk = current;
18733+ struct mm_struct *mm = tsk->mm;
18734+
18735+#ifdef CONFIG_X86_64
18736+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18737+ if (regs->ip == (unsigned long)vgettimeofday) {
18738+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18739+ return;
18740+ } else if (regs->ip == (unsigned long)vtime) {
18741+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18742+ return;
18743+ } else if (regs->ip == (unsigned long)vgetcpu) {
18744+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18745+ return;
18746+ }
18747+ }
18748+#endif
18749+
18750+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18751+ if (mm && (error_code & PF_USER)) {
18752+ unsigned long ip = regs->ip;
18753+
18754+ if (v8086_mode(regs))
18755+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18756+
18757+ /*
18758+ * It's possible to have interrupts off here:
18759+ */
18760+ local_irq_enable();
18761+
18762+#ifdef CONFIG_PAX_PAGEEXEC
18763+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18764+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18765+
18766+#ifdef CONFIG_PAX_EMUTRAMP
18767+ switch (pax_handle_fetch_fault(regs)) {
18768+ case 2:
18769+ return;
18770+ }
18771+#endif
18772+
18773+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18774+ do_group_exit(SIGKILL);
18775+ }
18776+#endif
18777+
18778+#ifdef CONFIG_PAX_SEGMEXEC
18779+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18780+
18781+#ifdef CONFIG_PAX_EMUTRAMP
18782+ switch (pax_handle_fetch_fault(regs)) {
18783+ case 2:
18784+ return;
18785+ }
18786+#endif
18787+
18788+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18789+ do_group_exit(SIGKILL);
18790+ }
18791+#endif
18792+
18793+ }
18794+#endif
18795
18796 /* User mode accesses just cause a SIGSEGV */
18797 if (error_code & PF_USER) {
18798@@ -855,6 +995,99 @@ static int spurious_fault_check(unsigned
18799 return 1;
18800 }
18801
18802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18803+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18804+{
18805+ pte_t *pte;
18806+ pmd_t *pmd;
18807+ spinlock_t *ptl;
18808+ unsigned char pte_mask;
18809+
18810+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18811+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
18812+ return 0;
18813+
18814+ /* PaX: it's our fault, let's handle it if we can */
18815+
18816+ /* PaX: take a look at read faults before acquiring any locks */
18817+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18818+ /* instruction fetch attempt from a protected page in user mode */
18819+ up_read(&mm->mmap_sem);
18820+
18821+#ifdef CONFIG_PAX_EMUTRAMP
18822+ switch (pax_handle_fetch_fault(regs)) {
18823+ case 2:
18824+ return 1;
18825+ }
18826+#endif
18827+
18828+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18829+ do_group_exit(SIGKILL);
18830+ }
18831+
18832+ pmd = pax_get_pmd(mm, address);
18833+ if (unlikely(!pmd))
18834+ return 0;
18835+
18836+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18837+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18838+ pte_unmap_unlock(pte, ptl);
18839+ return 0;
18840+ }
18841+
18842+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18843+ /* write attempt to a protected page in user mode */
18844+ pte_unmap_unlock(pte, ptl);
18845+ return 0;
18846+ }
18847+
18848+#ifdef CONFIG_SMP
18849+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18850+#else
18851+ if (likely(address > get_limit(regs->cs)))
18852+#endif
18853+ {
18854+ set_pte(pte, pte_mkread(*pte));
18855+ __flush_tlb_one(address);
18856+ pte_unmap_unlock(pte, ptl);
18857+ up_read(&mm->mmap_sem);
18858+ return 1;
18859+ }
18860+
18861+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18862+
18863+ /*
18864+ * PaX: fill DTLB with user rights and retry
18865+ */
18866+ __asm__ __volatile__ (
18867+ "orb %2,(%1)\n"
18868+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18869+/*
18870+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18871+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18872+ * page fault when examined during a TLB load attempt. this is true not only
18873+ * for PTEs holding a non-present entry but also present entries that will
18874+ * raise a page fault (such as those set up by PaX, or the copy-on-write
18875+ * mechanism). in effect it means that we do *not* need to flush the TLBs
18876+ * for our target pages since their PTEs are simply not in the TLBs at all.
18877+
18878+ * the best thing in omitting it is that we gain around 15-20% speed in the
18879+ * fast path of the page fault handler and can get rid of tracing since we
18880+ * can no longer flush unintended entries.
18881+ */
18882+ "invlpg (%0)\n"
18883+#endif
18884+ __copyuser_seg"testb $0,(%0)\n"
18885+ "xorb %3,(%1)\n"
18886+ :
18887+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18888+ : "memory", "cc");
18889+ pte_unmap_unlock(pte, ptl);
18890+ up_read(&mm->mmap_sem);
18891+ return 1;
18892+}
18893+#endif
18894+
18895 /*
18896 * Handle a spurious fault caused by a stale TLB entry.
18897 *
18898@@ -927,6 +1160,9 @@ int show_unhandled_signals = 1;
18899 static inline int
18900 access_error(unsigned long error_code, struct vm_area_struct *vma)
18901 {
18902+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18903+ return 1;
18904+
18905 if (error_code & PF_WRITE) {
18906 /* write, present and write, not present: */
18907 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18908@@ -960,19 +1196,33 @@ do_page_fault(struct pt_regs *regs, unsi
18909 {
18910 struct vm_area_struct *vma;
18911 struct task_struct *tsk;
18912- unsigned long address;
18913 struct mm_struct *mm;
18914 int fault;
18915 int write = error_code & PF_WRITE;
18916 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18917 (write ? FAULT_FLAG_WRITE : 0);
18918
18919+ /* Get the faulting address: */
18920+ unsigned long address = read_cr2();
18921+
18922+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18923+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18924+ if (!search_exception_tables(regs->ip)) {
18925+ bad_area_nosemaphore(regs, error_code, address);
18926+ return;
18927+ }
18928+ if (address < PAX_USER_SHADOW_BASE) {
18929+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18930+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18931+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18932+ } else
18933+ address -= PAX_USER_SHADOW_BASE;
18934+ }
18935+#endif
18936+
18937 tsk = current;
18938 mm = tsk->mm;
18939
18940- /* Get the faulting address: */
18941- address = read_cr2();
18942-
18943 /*
18944 * Detect and handle instructions that would cause a page fault for
18945 * both a tracked kernel page and a userspace page.
18946@@ -1032,7 +1282,7 @@ do_page_fault(struct pt_regs *regs, unsi
18947 * User-mode registers count as a user access even for any
18948 * potential system fault or CPU buglet:
18949 */
18950- if (user_mode_vm(regs)) {
18951+ if (user_mode(regs)) {
18952 local_irq_enable();
18953 error_code |= PF_USER;
18954 } else {
18955@@ -1087,6 +1337,11 @@ retry:
18956 might_sleep();
18957 }
18958
18959+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18960+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18961+ return;
18962+#endif
18963+
18964 vma = find_vma(mm, address);
18965 if (unlikely(!vma)) {
18966 bad_area(regs, error_code, address);
18967@@ -1098,18 +1353,24 @@ retry:
18968 bad_area(regs, error_code, address);
18969 return;
18970 }
18971- if (error_code & PF_USER) {
18972- /*
18973- * Accessing the stack below %sp is always a bug.
18974- * The large cushion allows instructions like enter
18975- * and pusha to work. ("enter $65535, $31" pushes
18976- * 32 pointers and then decrements %sp by 65535.)
18977- */
18978- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18979- bad_area(regs, error_code, address);
18980- return;
18981- }
18982+ /*
18983+ * Accessing the stack below %sp is always a bug.
18984+ * The large cushion allows instructions like enter
18985+ * and pusha to work. ("enter $65535, $31" pushes
18986+ * 32 pointers and then decrements %sp by 65535.)
18987+ */
18988+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18989+ bad_area(regs, error_code, address);
18990+ return;
18991 }
18992+
18993+#ifdef CONFIG_PAX_SEGMEXEC
18994+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18995+ bad_area(regs, error_code, address);
18996+ return;
18997+ }
18998+#endif
18999+
19000 if (unlikely(expand_stack(vma, address))) {
19001 bad_area(regs, error_code, address);
19002 return;
19003@@ -1164,3 +1425,199 @@ good_area:
19004
19005 up_read(&mm->mmap_sem);
19006 }
19007+
19008+#ifdef CONFIG_PAX_EMUTRAMP
19009+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19010+{
19011+ int err;
19012+
19013+ do { /* PaX: gcc trampoline emulation #1 */
19014+ unsigned char mov1, mov2;
19015+ unsigned short jmp;
19016+ unsigned int addr1, addr2;
19017+
19018+#ifdef CONFIG_X86_64
19019+ if ((regs->ip + 11) >> 32)
19020+ break;
19021+#endif
19022+
19023+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19024+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19025+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19026+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19027+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19028+
19029+ if (err)
19030+ break;
19031+
19032+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19033+ regs->cx = addr1;
19034+ regs->ax = addr2;
19035+ regs->ip = addr2;
19036+ return 2;
19037+ }
19038+ } while (0);
19039+
19040+ do { /* PaX: gcc trampoline emulation #2 */
19041+ unsigned char mov, jmp;
19042+ unsigned int addr1, addr2;
19043+
19044+#ifdef CONFIG_X86_64
19045+ if ((regs->ip + 9) >> 32)
19046+ break;
19047+#endif
19048+
19049+ err = get_user(mov, (unsigned char __user *)regs->ip);
19050+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19051+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19052+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19053+
19054+ if (err)
19055+ break;
19056+
19057+ if (mov == 0xB9 && jmp == 0xE9) {
19058+ regs->cx = addr1;
19059+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19060+ return 2;
19061+ }
19062+ } while (0);
19063+
19064+ return 1; /* PaX in action */
19065+}
19066+
19067+#ifdef CONFIG_X86_64
19068+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19069+{
19070+ int err;
19071+
19072+ do { /* PaX: gcc trampoline emulation #1 */
19073+ unsigned short mov1, mov2, jmp1;
19074+ unsigned char jmp2;
19075+ unsigned int addr1;
19076+ unsigned long addr2;
19077+
19078+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19079+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19080+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19081+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19082+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19083+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19084+
19085+ if (err)
19086+ break;
19087+
19088+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19089+ regs->r11 = addr1;
19090+ regs->r10 = addr2;
19091+ regs->ip = addr1;
19092+ return 2;
19093+ }
19094+ } while (0);
19095+
19096+ do { /* PaX: gcc trampoline emulation #2 */
19097+ unsigned short mov1, mov2, jmp1;
19098+ unsigned char jmp2;
19099+ unsigned long addr1, addr2;
19100+
19101+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19102+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19103+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19104+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19105+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19106+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19107+
19108+ if (err)
19109+ break;
19110+
19111+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19112+ regs->r11 = addr1;
19113+ regs->r10 = addr2;
19114+ regs->ip = addr1;
19115+ return 2;
19116+ }
19117+ } while (0);
19118+
19119+ return 1; /* PaX in action */
19120+}
19121+#endif
19122+
19123+/*
19124+ * PaX: decide what to do with offenders (regs->ip = fault address)
19125+ *
19126+ * returns 1 when task should be killed
19127+ * 2 when gcc trampoline was detected
19128+ */
19129+static int pax_handle_fetch_fault(struct pt_regs *regs)
19130+{
19131+ if (v8086_mode(regs))
19132+ return 1;
19133+
19134+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19135+ return 1;
19136+
19137+#ifdef CONFIG_X86_32
19138+ return pax_handle_fetch_fault_32(regs);
19139+#else
19140+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19141+ return pax_handle_fetch_fault_32(regs);
19142+ else
19143+ return pax_handle_fetch_fault_64(regs);
19144+#endif
19145+}
19146+#endif
19147+
19148+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19149+void pax_report_insns(void *pc, void *sp)
19150+{
19151+ long i;
19152+
19153+ printk(KERN_ERR "PAX: bytes at PC: ");
19154+ for (i = 0; i < 20; i++) {
19155+ unsigned char c;
19156+ if (get_user(c, (__force unsigned char __user *)pc+i))
19157+ printk(KERN_CONT "?? ");
19158+ else
19159+ printk(KERN_CONT "%02x ", c);
19160+ }
19161+ printk("\n");
19162+
19163+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19164+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19165+ unsigned long c;
19166+ if (get_user(c, (__force unsigned long __user *)sp+i))
19167+#ifdef CONFIG_X86_32
19168+ printk(KERN_CONT "???????? ");
19169+#else
19170+ printk(KERN_CONT "???????????????? ");
19171+#endif
19172+ else
19173+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19174+ }
19175+ printk("\n");
19176+}
19177+#endif
19178+
19179+/**
19180+ * probe_kernel_write(): safely attempt to write to a location
19181+ * @dst: address to write to
19182+ * @src: pointer to the data that shall be written
19183+ * @size: size of the data chunk
19184+ *
19185+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19186+ * happens, handle that and return -EFAULT.
19187+ */
19188+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19189+{
19190+ long ret;
19191+ mm_segment_t old_fs = get_fs();
19192+
19193+ set_fs(KERNEL_DS);
19194+ pagefault_disable();
19195+ pax_open_kernel();
19196+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19197+ pax_close_kernel();
19198+ pagefault_enable();
19199+ set_fs(old_fs);
19200+
19201+ return ret ? -EFAULT : 0;
19202+}
19203diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19204--- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19205+++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19206@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19207 addr = start;
19208 len = (unsigned long) nr_pages << PAGE_SHIFT;
19209 end = start + len;
19210- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19211+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19212 (void __user *)start, len)))
19213 return 0;
19214
19215diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19216--- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19217+++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19218@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19219 idx = type + KM_TYPE_NR*smp_processor_id();
19220 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19221 BUG_ON(!pte_none(*(kmap_pte-idx)));
19222+
19223+ pax_open_kernel();
19224 set_pte(kmap_pte-idx, mk_pte(page, prot));
19225+ pax_close_kernel();
19226
19227 return (void *)vaddr;
19228 }
19229diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19230--- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19231+++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19232@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19233 struct hstate *h = hstate_file(file);
19234 struct mm_struct *mm = current->mm;
19235 struct vm_area_struct *vma;
19236- unsigned long start_addr;
19237+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19238+
19239+#ifdef CONFIG_PAX_SEGMEXEC
19240+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19241+ pax_task_size = SEGMEXEC_TASK_SIZE;
19242+#endif
19243+
19244+ pax_task_size -= PAGE_SIZE;
19245
19246 if (len > mm->cached_hole_size) {
19247- start_addr = mm->free_area_cache;
19248+ start_addr = mm->free_area_cache;
19249 } else {
19250- start_addr = TASK_UNMAPPED_BASE;
19251- mm->cached_hole_size = 0;
19252+ start_addr = mm->mmap_base;
19253+ mm->cached_hole_size = 0;
19254 }
19255
19256 full_search:
19257@@ -280,26 +287,27 @@ full_search:
19258
19259 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19260 /* At this point: (!vma || addr < vma->vm_end). */
19261- if (TASK_SIZE - len < addr) {
19262+ if (pax_task_size - len < addr) {
19263 /*
19264 * Start a new search - just in case we missed
19265 * some holes.
19266 */
19267- if (start_addr != TASK_UNMAPPED_BASE) {
19268- start_addr = TASK_UNMAPPED_BASE;
19269+ if (start_addr != mm->mmap_base) {
19270+ start_addr = mm->mmap_base;
19271 mm->cached_hole_size = 0;
19272 goto full_search;
19273 }
19274 return -ENOMEM;
19275 }
19276- if (!vma || addr + len <= vma->vm_start) {
19277- mm->free_area_cache = addr + len;
19278- return addr;
19279- }
19280+ if (check_heap_stack_gap(vma, addr, len))
19281+ break;
19282 if (addr + mm->cached_hole_size < vma->vm_start)
19283 mm->cached_hole_size = vma->vm_start - addr;
19284 addr = ALIGN(vma->vm_end, huge_page_size(h));
19285 }
19286+
19287+ mm->free_area_cache = addr + len;
19288+ return addr;
19289 }
19290
19291 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19292@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19293 {
19294 struct hstate *h = hstate_file(file);
19295 struct mm_struct *mm = current->mm;
19296- struct vm_area_struct *vma, *prev_vma;
19297- unsigned long base = mm->mmap_base, addr = addr0;
19298+ struct vm_area_struct *vma;
19299+ unsigned long base = mm->mmap_base, addr;
19300 unsigned long largest_hole = mm->cached_hole_size;
19301- int first_time = 1;
19302
19303 /* don't allow allocations above current base */
19304 if (mm->free_area_cache > base)
19305@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19306 largest_hole = 0;
19307 mm->free_area_cache = base;
19308 }
19309-try_again:
19310+
19311 /* make sure it can fit in the remaining address space */
19312 if (mm->free_area_cache < len)
19313 goto fail;
19314
19315 /* either no address requested or can't fit in requested address hole */
19316- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19317+ addr = (mm->free_area_cache - len);
19318 do {
19319+ addr &= huge_page_mask(h);
19320+ vma = find_vma(mm, addr);
19321 /*
19322 * Lookup failure means no vma is above this address,
19323 * i.e. return with success:
19324- */
19325- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19326- return addr;
19327-
19328- /*
19329 * new region fits between prev_vma->vm_end and
19330 * vma->vm_start, use it:
19331 */
19332- if (addr + len <= vma->vm_start &&
19333- (!prev_vma || (addr >= prev_vma->vm_end))) {
19334+ if (check_heap_stack_gap(vma, addr, len)) {
19335 /* remember the address as a hint for next time */
19336- mm->cached_hole_size = largest_hole;
19337- return (mm->free_area_cache = addr);
19338- } else {
19339- /* pull free_area_cache down to the first hole */
19340- if (mm->free_area_cache == vma->vm_end) {
19341- mm->free_area_cache = vma->vm_start;
19342- mm->cached_hole_size = largest_hole;
19343- }
19344+ mm->cached_hole_size = largest_hole;
19345+ return (mm->free_area_cache = addr);
19346+ }
19347+ /* pull free_area_cache down to the first hole */
19348+ if (mm->free_area_cache == vma->vm_end) {
19349+ mm->free_area_cache = vma->vm_start;
19350+ mm->cached_hole_size = largest_hole;
19351 }
19352
19353 /* remember the largest hole we saw so far */
19354 if (addr + largest_hole < vma->vm_start)
19355- largest_hole = vma->vm_start - addr;
19356+ largest_hole = vma->vm_start - addr;
19357
19358 /* try just below the current vma->vm_start */
19359- addr = (vma->vm_start - len) & huge_page_mask(h);
19360- } while (len <= vma->vm_start);
19361+ addr = skip_heap_stack_gap(vma, len);
19362+ } while (!IS_ERR_VALUE(addr));
19363
19364 fail:
19365 /*
19366- * if hint left us with no space for the requested
19367- * mapping then try again:
19368- */
19369- if (first_time) {
19370- mm->free_area_cache = base;
19371- largest_hole = 0;
19372- first_time = 0;
19373- goto try_again;
19374- }
19375- /*
19376 * A failed mmap() very likely causes application failure,
19377 * so fall back to the bottom-up function here. This scenario
19378 * can happen with large stack limits and large mmap()
19379 * allocations.
19380 */
19381- mm->free_area_cache = TASK_UNMAPPED_BASE;
19382+
19383+#ifdef CONFIG_PAX_SEGMEXEC
19384+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19385+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19386+ else
19387+#endif
19388+
19389+ mm->mmap_base = TASK_UNMAPPED_BASE;
19390+
19391+#ifdef CONFIG_PAX_RANDMMAP
19392+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19393+ mm->mmap_base += mm->delta_mmap;
19394+#endif
19395+
19396+ mm->free_area_cache = mm->mmap_base;
19397 mm->cached_hole_size = ~0UL;
19398 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19399 len, pgoff, flags);
19400@@ -386,6 +392,7 @@ fail:
19401 /*
19402 * Restore the topdown base:
19403 */
19404+ mm->mmap_base = base;
19405 mm->free_area_cache = base;
19406 mm->cached_hole_size = ~0UL;
19407
19408@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19409 struct hstate *h = hstate_file(file);
19410 struct mm_struct *mm = current->mm;
19411 struct vm_area_struct *vma;
19412+ unsigned long pax_task_size = TASK_SIZE;
19413
19414 if (len & ~huge_page_mask(h))
19415 return -EINVAL;
19416- if (len > TASK_SIZE)
19417+
19418+#ifdef CONFIG_PAX_SEGMEXEC
19419+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19420+ pax_task_size = SEGMEXEC_TASK_SIZE;
19421+#endif
19422+
19423+ pax_task_size -= PAGE_SIZE;
19424+
19425+ if (len > pax_task_size)
19426 return -ENOMEM;
19427
19428 if (flags & MAP_FIXED) {
19429@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19430 if (addr) {
19431 addr = ALIGN(addr, huge_page_size(h));
19432 vma = find_vma(mm, addr);
19433- if (TASK_SIZE - len >= addr &&
19434- (!vma || addr + len <= vma->vm_start))
19435+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19436 return addr;
19437 }
19438 if (mm->get_unmapped_area == arch_get_unmapped_area)
19439diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19440--- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19441+++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19442@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19443 }
19444
19445 /*
19446- * Creates a middle page table and puts a pointer to it in the
19447- * given global directory entry. This only returns the gd entry
19448- * in non-PAE compilation mode, since the middle layer is folded.
19449- */
19450-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19451-{
19452- pud_t *pud;
19453- pmd_t *pmd_table;
19454-
19455-#ifdef CONFIG_X86_PAE
19456- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19457- if (after_bootmem)
19458- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19459- else
19460- pmd_table = (pmd_t *)alloc_low_page();
19461- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19462- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19463- pud = pud_offset(pgd, 0);
19464- BUG_ON(pmd_table != pmd_offset(pud, 0));
19465-
19466- return pmd_table;
19467- }
19468-#endif
19469- pud = pud_offset(pgd, 0);
19470- pmd_table = pmd_offset(pud, 0);
19471-
19472- return pmd_table;
19473-}
19474-
19475-/*
19476 * Create a page table and place a pointer to it in a middle page
19477 * directory entry:
19478 */
19479@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19480 page_table = (pte_t *)alloc_low_page();
19481
19482 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19483+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19484+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19485+#else
19486 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19487+#endif
19488 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19489 }
19490
19491 return pte_offset_kernel(pmd, 0);
19492 }
19493
19494+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19495+{
19496+ pud_t *pud;
19497+ pmd_t *pmd_table;
19498+
19499+ pud = pud_offset(pgd, 0);
19500+ pmd_table = pmd_offset(pud, 0);
19501+
19502+ return pmd_table;
19503+}
19504+
19505 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19506 {
19507 int pgd_idx = pgd_index(vaddr);
19508@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19509 int pgd_idx, pmd_idx;
19510 unsigned long vaddr;
19511 pgd_t *pgd;
19512+ pud_t *pud;
19513 pmd_t *pmd;
19514 pte_t *pte = NULL;
19515
19516@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19517 pgd = pgd_base + pgd_idx;
19518
19519 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19520- pmd = one_md_table_init(pgd);
19521- pmd = pmd + pmd_index(vaddr);
19522+ pud = pud_offset(pgd, vaddr);
19523+ pmd = pmd_offset(pud, vaddr);
19524+
19525+#ifdef CONFIG_X86_PAE
19526+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19527+#endif
19528+
19529 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19530 pmd++, pmd_idx++) {
19531 pte = page_table_kmap_check(one_page_table_init(pmd),
19532@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19533 }
19534 }
19535
19536-static inline int is_kernel_text(unsigned long addr)
19537+static inline int is_kernel_text(unsigned long start, unsigned long end)
19538 {
19539- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19540- return 1;
19541- return 0;
19542+ if ((start > ktla_ktva((unsigned long)_etext) ||
19543+ end <= ktla_ktva((unsigned long)_stext)) &&
19544+ (start > ktla_ktva((unsigned long)_einittext) ||
19545+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19546+
19547+#ifdef CONFIG_ACPI_SLEEP
19548+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19549+#endif
19550+
19551+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19552+ return 0;
19553+ return 1;
19554 }
19555
19556 /*
19557@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19558 unsigned long last_map_addr = end;
19559 unsigned long start_pfn, end_pfn;
19560 pgd_t *pgd_base = swapper_pg_dir;
19561- int pgd_idx, pmd_idx, pte_ofs;
19562+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19563 unsigned long pfn;
19564 pgd_t *pgd;
19565+ pud_t *pud;
19566 pmd_t *pmd;
19567 pte_t *pte;
19568 unsigned pages_2m, pages_4k;
19569@@ -281,8 +282,13 @@ repeat:
19570 pfn = start_pfn;
19571 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19572 pgd = pgd_base + pgd_idx;
19573- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19574- pmd = one_md_table_init(pgd);
19575+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19576+ pud = pud_offset(pgd, 0);
19577+ pmd = pmd_offset(pud, 0);
19578+
19579+#ifdef CONFIG_X86_PAE
19580+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19581+#endif
19582
19583 if (pfn >= end_pfn)
19584 continue;
19585@@ -294,14 +300,13 @@ repeat:
19586 #endif
19587 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19588 pmd++, pmd_idx++) {
19589- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19590+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19591
19592 /*
19593 * Map with big pages if possible, otherwise
19594 * create normal page tables:
19595 */
19596 if (use_pse) {
19597- unsigned int addr2;
19598 pgprot_t prot = PAGE_KERNEL_LARGE;
19599 /*
19600 * first pass will use the same initial
19601@@ -311,11 +316,7 @@ repeat:
19602 __pgprot(PTE_IDENT_ATTR |
19603 _PAGE_PSE);
19604
19605- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19606- PAGE_OFFSET + PAGE_SIZE-1;
19607-
19608- if (is_kernel_text(addr) ||
19609- is_kernel_text(addr2))
19610+ if (is_kernel_text(address, address + PMD_SIZE))
19611 prot = PAGE_KERNEL_LARGE_EXEC;
19612
19613 pages_2m++;
19614@@ -332,7 +333,7 @@ repeat:
19615 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19616 pte += pte_ofs;
19617 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19618- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19619+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19620 pgprot_t prot = PAGE_KERNEL;
19621 /*
19622 * first pass will use the same initial
19623@@ -340,7 +341,7 @@ repeat:
19624 */
19625 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19626
19627- if (is_kernel_text(addr))
19628+ if (is_kernel_text(address, address + PAGE_SIZE))
19629 prot = PAGE_KERNEL_EXEC;
19630
19631 pages_4k++;
19632@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19633
19634 pud = pud_offset(pgd, va);
19635 pmd = pmd_offset(pud, va);
19636- if (!pmd_present(*pmd))
19637+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19638 break;
19639
19640 pte = pte_offset_kernel(pmd, va);
19641@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19642
19643 static void __init pagetable_init(void)
19644 {
19645- pgd_t *pgd_base = swapper_pg_dir;
19646-
19647- permanent_kmaps_init(pgd_base);
19648+ permanent_kmaps_init(swapper_pg_dir);
19649 }
19650
19651-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19652+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19653 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19654
19655 /* user-defined highmem size */
19656@@ -754,6 +753,12 @@ void __init mem_init(void)
19657
19658 pci_iommu_alloc();
19659
19660+#ifdef CONFIG_PAX_PER_CPU_PGD
19661+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19662+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19663+ KERNEL_PGD_PTRS);
19664+#endif
19665+
19666 #ifdef CONFIG_FLATMEM
19667 BUG_ON(!mem_map);
19668 #endif
19669@@ -771,7 +776,7 @@ void __init mem_init(void)
19670 set_highmem_pages_init();
19671
19672 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19673- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19674+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19675 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19676
19677 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19678@@ -812,10 +817,10 @@ void __init mem_init(void)
19679 ((unsigned long)&__init_end -
19680 (unsigned long)&__init_begin) >> 10,
19681
19682- (unsigned long)&_etext, (unsigned long)&_edata,
19683- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19684+ (unsigned long)&_sdata, (unsigned long)&_edata,
19685+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19686
19687- (unsigned long)&_text, (unsigned long)&_etext,
19688+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19689 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19690
19691 /*
19692@@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19693 if (!kernel_set_to_readonly)
19694 return;
19695
19696+ start = ktla_ktva(start);
19697 pr_debug("Set kernel text: %lx - %lx for read write\n",
19698 start, start+size);
19699
19700@@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19701 if (!kernel_set_to_readonly)
19702 return;
19703
19704+ start = ktla_ktva(start);
19705 pr_debug("Set kernel text: %lx - %lx for read only\n",
19706 start, start+size);
19707
19708@@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19709 unsigned long start = PFN_ALIGN(_text);
19710 unsigned long size = PFN_ALIGN(_etext) - start;
19711
19712+ start = ktla_ktva(start);
19713 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19714 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19715 size >> 10);
19716diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19717--- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19718+++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19719@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19720 * around without checking the pgd every time.
19721 */
19722
19723-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19724+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19725 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19726
19727 int force_personality32;
19728@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19729
19730 for (address = start; address <= end; address += PGDIR_SIZE) {
19731 const pgd_t *pgd_ref = pgd_offset_k(address);
19732+
19733+#ifdef CONFIG_PAX_PER_CPU_PGD
19734+ unsigned long cpu;
19735+#else
19736 struct page *page;
19737+#endif
19738
19739 if (pgd_none(*pgd_ref))
19740 continue;
19741
19742 spin_lock(&pgd_lock);
19743+
19744+#ifdef CONFIG_PAX_PER_CPU_PGD
19745+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19746+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19747+#else
19748 list_for_each_entry(page, &pgd_list, lru) {
19749 pgd_t *pgd;
19750 spinlock_t *pgt_lock;
19751@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19752 /* the pgt_lock only for Xen */
19753 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19754 spin_lock(pgt_lock);
19755+#endif
19756
19757 if (pgd_none(*pgd))
19758 set_pgd(pgd, *pgd_ref);
19759@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19760 BUG_ON(pgd_page_vaddr(*pgd)
19761 != pgd_page_vaddr(*pgd_ref));
19762
19763+#ifndef CONFIG_PAX_PER_CPU_PGD
19764 spin_unlock(pgt_lock);
19765+#endif
19766+
19767 }
19768 spin_unlock(&pgd_lock);
19769 }
19770@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19771 pmd = fill_pmd(pud, vaddr);
19772 pte = fill_pte(pmd, vaddr);
19773
19774+ pax_open_kernel();
19775 set_pte(pte, new_pte);
19776+ pax_close_kernel();
19777
19778 /*
19779 * It's enough to flush this one mapping.
19780@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19781 pgd = pgd_offset_k((unsigned long)__va(phys));
19782 if (pgd_none(*pgd)) {
19783 pud = (pud_t *) spp_getpage();
19784- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19785- _PAGE_USER));
19786+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19787 }
19788 pud = pud_offset(pgd, (unsigned long)__va(phys));
19789 if (pud_none(*pud)) {
19790 pmd = (pmd_t *) spp_getpage();
19791- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19792- _PAGE_USER));
19793+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19794 }
19795 pmd = pmd_offset(pud, phys);
19796 BUG_ON(!pmd_none(*pmd));
19797@@ -698,6 +712,12 @@ void __init mem_init(void)
19798
19799 pci_iommu_alloc();
19800
19801+#ifdef CONFIG_PAX_PER_CPU_PGD
19802+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19803+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19804+ KERNEL_PGD_PTRS);
19805+#endif
19806+
19807 /* clear_bss() already clear the empty_zero_page */
19808
19809 reservedpages = 0;
19810@@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19811 static struct vm_area_struct gate_vma = {
19812 .vm_start = VSYSCALL_START,
19813 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19814- .vm_page_prot = PAGE_READONLY_EXEC,
19815- .vm_flags = VM_READ | VM_EXEC
19816+ .vm_page_prot = PAGE_READONLY,
19817+ .vm_flags = VM_READ
19818 };
19819
19820 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19821@@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19822
19823 const char *arch_vma_name(struct vm_area_struct *vma)
19824 {
19825- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19826+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19827 return "[vdso]";
19828 if (vma == &gate_vma)
19829 return "[vsyscall]";
19830diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19831--- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19832+++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19833@@ -33,7 +33,7 @@ int direct_gbpages
19834 static void __init find_early_table_space(unsigned long end, int use_pse,
19835 int use_gbpages)
19836 {
19837- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19838+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19839 phys_addr_t base;
19840
19841 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19842@@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19843 */
19844 int devmem_is_allowed(unsigned long pagenr)
19845 {
19846- if (pagenr <= 256)
19847+#ifdef CONFIG_GRKERNSEC_KMEM
19848+ /* allow BDA */
19849+ if (!pagenr)
19850+ return 1;
19851+ /* allow EBDA */
19852+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19853+ return 1;
19854+#else
19855+ if (!pagenr)
19856+ return 1;
19857+#ifdef CONFIG_VM86
19858+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19859+ return 1;
19860+#endif
19861+#endif
19862+
19863+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19864 return 1;
19865+#ifdef CONFIG_GRKERNSEC_KMEM
19866+ /* throw out everything else below 1MB */
19867+ if (pagenr <= 256)
19868+ return 0;
19869+#endif
19870 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19871 return 0;
19872 if (!page_is_ram(pagenr))
19873 return 1;
19874+
19875 return 0;
19876 }
19877
19878@@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19879
19880 void free_initmem(void)
19881 {
19882+
19883+#ifdef CONFIG_PAX_KERNEXEC
19884+#ifdef CONFIG_X86_32
19885+ /* PaX: limit KERNEL_CS to actual size */
19886+ unsigned long addr, limit;
19887+ struct desc_struct d;
19888+ int cpu;
19889+
19890+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19891+ limit = (limit - 1UL) >> PAGE_SHIFT;
19892+
19893+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19894+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
19895+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19896+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19897+ }
19898+
19899+ /* PaX: make KERNEL_CS read-only */
19900+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19901+ if (!paravirt_enabled())
19902+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19903+/*
19904+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19905+ pgd = pgd_offset_k(addr);
19906+ pud = pud_offset(pgd, addr);
19907+ pmd = pmd_offset(pud, addr);
19908+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19909+ }
19910+*/
19911+#ifdef CONFIG_X86_PAE
19912+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19913+/*
19914+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19915+ pgd = pgd_offset_k(addr);
19916+ pud = pud_offset(pgd, addr);
19917+ pmd = pmd_offset(pud, addr);
19918+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19919+ }
19920+*/
19921+#endif
19922+
19923+#ifdef CONFIG_MODULES
19924+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19925+#endif
19926+
19927+#else
19928+ pgd_t *pgd;
19929+ pud_t *pud;
19930+ pmd_t *pmd;
19931+ unsigned long addr, end;
19932+
19933+ /* PaX: make kernel code/rodata read-only, rest non-executable */
19934+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19935+ pgd = pgd_offset_k(addr);
19936+ pud = pud_offset(pgd, addr);
19937+ pmd = pmd_offset(pud, addr);
19938+ if (!pmd_present(*pmd))
19939+ continue;
19940+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19941+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19942+ else
19943+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19944+ }
19945+
19946+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19947+ end = addr + KERNEL_IMAGE_SIZE;
19948+ for (; addr < end; addr += PMD_SIZE) {
19949+ pgd = pgd_offset_k(addr);
19950+ pud = pud_offset(pgd, addr);
19951+ pmd = pmd_offset(pud, addr);
19952+ if (!pmd_present(*pmd))
19953+ continue;
19954+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19955+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19956+ }
19957+#endif
19958+
19959+ flush_tlb_all();
19960+#endif
19961+
19962 free_init_pages("unused kernel memory",
19963 (unsigned long)(&__init_begin),
19964 (unsigned long)(&__init_end));
19965diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
19966--- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
19967+++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
19968@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19969 type = kmap_atomic_idx_push();
19970 idx = type + KM_TYPE_NR * smp_processor_id();
19971 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19972+
19973+ pax_open_kernel();
19974 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19975+ pax_close_kernel();
19976+
19977 arch_flush_lazy_mmu_mode();
19978
19979 return (void *)vaddr;
19980diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
19981--- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
19982+++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
19983@@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
19984 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19985 int is_ram = page_is_ram(pfn);
19986
19987- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19988+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19989 return NULL;
19990 WARN_ON_ONCE(is_ram);
19991 }
19992@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19993 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19994
19995 static __initdata int after_paging_init;
19996-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19997+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19998
19999 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20000 {
20001@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20002 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20003
20004 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20005- memset(bm_pte, 0, sizeof(bm_pte));
20006- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20007+ pmd_populate_user(&init_mm, pmd, bm_pte);
20008
20009 /*
20010 * The boot-ioremap range spans multiple pmds, for which
20011diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20012--- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20013+++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20014@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20015 * memory (e.g. tracked pages)? For now, we need this to avoid
20016 * invoking kmemcheck for PnP BIOS calls.
20017 */
20018- if (regs->flags & X86_VM_MASK)
20019+ if (v8086_mode(regs))
20020 return false;
20021- if (regs->cs != __KERNEL_CS)
20022+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20023 return false;
20024
20025 pte = kmemcheck_pte_lookup(address);
20026diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20027--- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20028+++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20029@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20030 * Leave an at least ~128 MB hole with possible stack randomization.
20031 */
20032 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20033-#define MAX_GAP (TASK_SIZE/6*5)
20034+#define MAX_GAP (pax_task_size/6*5)
20035
20036 /*
20037 * True on X86_32 or when emulating IA32 on X86_64
20038@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20039 return rnd << PAGE_SHIFT;
20040 }
20041
20042-static unsigned long mmap_base(void)
20043+static unsigned long mmap_base(struct mm_struct *mm)
20044 {
20045 unsigned long gap = rlimit(RLIMIT_STACK);
20046+ unsigned long pax_task_size = TASK_SIZE;
20047+
20048+#ifdef CONFIG_PAX_SEGMEXEC
20049+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20050+ pax_task_size = SEGMEXEC_TASK_SIZE;
20051+#endif
20052
20053 if (gap < MIN_GAP)
20054 gap = MIN_GAP;
20055 else if (gap > MAX_GAP)
20056 gap = MAX_GAP;
20057
20058- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20059+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20060 }
20061
20062 /*
20063 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20064 * does, but not when emulating X86_32
20065 */
20066-static unsigned long mmap_legacy_base(void)
20067+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20068 {
20069- if (mmap_is_ia32())
20070+ if (mmap_is_ia32()) {
20071+
20072+#ifdef CONFIG_PAX_SEGMEXEC
20073+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20074+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20075+ else
20076+#endif
20077+
20078 return TASK_UNMAPPED_BASE;
20079- else
20080+ } else
20081 return TASK_UNMAPPED_BASE + mmap_rnd();
20082 }
20083
20084@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20085 void arch_pick_mmap_layout(struct mm_struct *mm)
20086 {
20087 if (mmap_is_legacy()) {
20088- mm->mmap_base = mmap_legacy_base();
20089+ mm->mmap_base = mmap_legacy_base(mm);
20090+
20091+#ifdef CONFIG_PAX_RANDMMAP
20092+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20093+ mm->mmap_base += mm->delta_mmap;
20094+#endif
20095+
20096 mm->get_unmapped_area = arch_get_unmapped_area;
20097 mm->unmap_area = arch_unmap_area;
20098 } else {
20099- mm->mmap_base = mmap_base();
20100+ mm->mmap_base = mmap_base(mm);
20101+
20102+#ifdef CONFIG_PAX_RANDMMAP
20103+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20104+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20105+#endif
20106+
20107 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20108 mm->unmap_area = arch_unmap_area_topdown;
20109 }
20110diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20111--- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20112+++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20113@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20114 break;
20115 default:
20116 {
20117- unsigned char *ip = (unsigned char *)instptr;
20118+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20119 my_trace->opcode = MMIO_UNKNOWN_OP;
20120 my_trace->width = 0;
20121 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20122@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20123 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20124 void __iomem *addr)
20125 {
20126- static atomic_t next_id;
20127+ static atomic_unchecked_t next_id;
20128 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20129 /* These are page-unaligned. */
20130 struct mmiotrace_map map = {
20131@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20132 .private = trace
20133 },
20134 .phys = offset,
20135- .id = atomic_inc_return(&next_id)
20136+ .id = atomic_inc_return_unchecked(&next_id)
20137 };
20138 map.map_id = trace->id;
20139
20140diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20141--- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20142+++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20143@@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20144 }
20145 #endif
20146
20147-extern unsigned long find_max_low_pfn(void);
20148 extern unsigned long highend_pfn, highstart_pfn;
20149
20150 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20151diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20152--- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20153+++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20154@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20155 */
20156 #ifdef CONFIG_PCI_BIOS
20157 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20158- pgprot_val(forbidden) |= _PAGE_NX;
20159+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20160 #endif
20161
20162 /*
20163@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20164 * Does not cover __inittext since that is gone later on. On
20165 * 64bit we do not enforce !NX on the low mapping
20166 */
20167- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20168- pgprot_val(forbidden) |= _PAGE_NX;
20169+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20170+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20171
20172+#ifdef CONFIG_DEBUG_RODATA
20173 /*
20174 * The .rodata section needs to be read-only. Using the pfn
20175 * catches all aliases.
20176@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20177 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20178 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20179 pgprot_val(forbidden) |= _PAGE_RW;
20180+#endif
20181
20182 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20183 /*
20184@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20185 }
20186 #endif
20187
20188+#ifdef CONFIG_PAX_KERNEXEC
20189+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20190+ pgprot_val(forbidden) |= _PAGE_RW;
20191+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20192+ }
20193+#endif
20194+
20195 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20196
20197 return prot;
20198@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20199 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20200 {
20201 /* change init_mm */
20202+ pax_open_kernel();
20203 set_pte_atomic(kpte, pte);
20204+
20205 #ifdef CONFIG_X86_32
20206 if (!SHARED_KERNEL_PMD) {
20207+
20208+#ifdef CONFIG_PAX_PER_CPU_PGD
20209+ unsigned long cpu;
20210+#else
20211 struct page *page;
20212+#endif
20213
20214+#ifdef CONFIG_PAX_PER_CPU_PGD
20215+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20216+ pgd_t *pgd = get_cpu_pgd(cpu);
20217+#else
20218 list_for_each_entry(page, &pgd_list, lru) {
20219- pgd_t *pgd;
20220+ pgd_t *pgd = (pgd_t *)page_address(page);
20221+#endif
20222+
20223 pud_t *pud;
20224 pmd_t *pmd;
20225
20226- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20227+ pgd += pgd_index(address);
20228 pud = pud_offset(pgd, address);
20229 pmd = pmd_offset(pud, address);
20230 set_pte_atomic((pte_t *)pmd, pte);
20231 }
20232 }
20233 #endif
20234+ pax_close_kernel();
20235 }
20236
20237 static int
20238diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20239--- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20240+++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20241@@ -36,7 +36,7 @@ enum {
20242
20243 static int pte_testbit(pte_t pte)
20244 {
20245- return pte_flags(pte) & _PAGE_UNUSED1;
20246+ return pte_flags(pte) & _PAGE_CPA_TEST;
20247 }
20248
20249 struct split_state {
20250diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20251--- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20252+++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20253@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20254
20255 if (!entry) {
20256 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20257- current->comm, current->pid, start, end);
20258+ current->comm, task_pid_nr(current), start, end);
20259 return -EINVAL;
20260 }
20261
20262@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20263 while (cursor < to) {
20264 if (!devmem_is_allowed(pfn)) {
20265 printk(KERN_INFO
20266- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20267- current->comm, from, to);
20268+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20269+ current->comm, from, to, cursor);
20270 return 0;
20271 }
20272 cursor += PAGE_SIZE;
20273@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20274 printk(KERN_INFO
20275 "%s:%d ioremap_change_attr failed %s "
20276 "for %Lx-%Lx\n",
20277- current->comm, current->pid,
20278+ current->comm, task_pid_nr(current),
20279 cattr_name(flags),
20280 base, (unsigned long long)(base + size));
20281 return -EINVAL;
20282@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20283 if (want_flags != flags) {
20284 printk(KERN_WARNING
20285 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20286- current->comm, current->pid,
20287+ current->comm, task_pid_nr(current),
20288 cattr_name(want_flags),
20289 (unsigned long long)paddr,
20290 (unsigned long long)(paddr + size),
20291@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20292 free_memtype(paddr, paddr + size);
20293 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20294 " for %Lx-%Lx, got %s\n",
20295- current->comm, current->pid,
20296+ current->comm, task_pid_nr(current),
20297 cattr_name(want_flags),
20298 (unsigned long long)paddr,
20299 (unsigned long long)(paddr + size),
20300diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20301--- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20302+++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20303@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20304 int i;
20305 enum reason_type rv = OTHERS;
20306
20307- p = (unsigned char *)ins_addr;
20308+ p = (unsigned char *)ktla_ktva(ins_addr);
20309 p += skip_prefix(p, &prf);
20310 p += get_opcode(p, &opcode);
20311
20312@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20313 struct prefix_bits prf;
20314 int i;
20315
20316- p = (unsigned char *)ins_addr;
20317+ p = (unsigned char *)ktla_ktva(ins_addr);
20318 p += skip_prefix(p, &prf);
20319 p += get_opcode(p, &opcode);
20320
20321@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20322 struct prefix_bits prf;
20323 int i;
20324
20325- p = (unsigned char *)ins_addr;
20326+ p = (unsigned char *)ktla_ktva(ins_addr);
20327 p += skip_prefix(p, &prf);
20328 p += get_opcode(p, &opcode);
20329
20330@@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20331 int i;
20332 unsigned long rv;
20333
20334- p = (unsigned char *)ins_addr;
20335+ p = (unsigned char *)ktla_ktva(ins_addr);
20336 p += skip_prefix(p, &prf);
20337 p += get_opcode(p, &opcode);
20338 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20339@@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20340 int i;
20341 unsigned long rv;
20342
20343- p = (unsigned char *)ins_addr;
20344+ p = (unsigned char *)ktla_ktva(ins_addr);
20345 p += skip_prefix(p, &prf);
20346 p += get_opcode(p, &opcode);
20347 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20348diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20349--- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20350+++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20351@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20352 return;
20353 }
20354 pte = pte_offset_kernel(pmd, vaddr);
20355+
20356+ pax_open_kernel();
20357 if (pte_val(pteval))
20358 set_pte_at(&init_mm, vaddr, pte, pteval);
20359 else
20360 pte_clear(&init_mm, vaddr, pte);
20361+ pax_close_kernel();
20362
20363 /*
20364 * It's enough to flush this one mapping.
20365diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20366--- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20367+++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20368@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20369 list_del(&page->lru);
20370 }
20371
20372-#define UNSHARED_PTRS_PER_PGD \
20373- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20374+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20375+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20376
20377+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20378+{
20379+ while (count--)
20380+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20381+}
20382+#endif
20383+
20384+#ifdef CONFIG_PAX_PER_CPU_PGD
20385+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20386+{
20387+ while (count--)
20388+
20389+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20390+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20391+#else
20392+ *dst++ = *src++;
20393+#endif
20394
20395+}
20396+#endif
20397+
20398+#ifdef CONFIG_X86_64
20399+#define pxd_t pud_t
20400+#define pyd_t pgd_t
20401+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20402+#define pxd_free(mm, pud) pud_free((mm), (pud))
20403+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20404+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20405+#define PYD_SIZE PGDIR_SIZE
20406+#else
20407+#define pxd_t pmd_t
20408+#define pyd_t pud_t
20409+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20410+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20411+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20412+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20413+#define PYD_SIZE PUD_SIZE
20414+#endif
20415+
20416+#ifdef CONFIG_PAX_PER_CPU_PGD
20417+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20418+static inline void pgd_dtor(pgd_t *pgd) {}
20419+#else
20420 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20421 {
20422 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20423@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20424 pgd_list_del(pgd);
20425 spin_unlock(&pgd_lock);
20426 }
20427+#endif
20428
20429 /*
20430 * List of all pgd's needed for non-PAE so it can invalidate entries
20431@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20432 * -- wli
20433 */
20434
20435-#ifdef CONFIG_X86_PAE
20436+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20437 /*
20438 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20439 * updating the top-level pagetable entries to guarantee the
20440@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20441 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20442 * and initialize the kernel pmds here.
20443 */
20444-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20445+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20446
20447 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20448 {
20449@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20450 */
20451 flush_tlb_mm(mm);
20452 }
20453+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20454+#define PREALLOCATED_PXDS USER_PGD_PTRS
20455 #else /* !CONFIG_X86_PAE */
20456
20457 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20458-#define PREALLOCATED_PMDS 0
20459+#define PREALLOCATED_PXDS 0
20460
20461 #endif /* CONFIG_X86_PAE */
20462
20463-static void free_pmds(pmd_t *pmds[])
20464+static void free_pxds(pxd_t *pxds[])
20465 {
20466 int i;
20467
20468- for(i = 0; i < PREALLOCATED_PMDS; i++)
20469- if (pmds[i])
20470- free_page((unsigned long)pmds[i]);
20471+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20472+ if (pxds[i])
20473+ free_page((unsigned long)pxds[i]);
20474 }
20475
20476-static int preallocate_pmds(pmd_t *pmds[])
20477+static int preallocate_pxds(pxd_t *pxds[])
20478 {
20479 int i;
20480 bool failed = false;
20481
20482- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20483- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20484- if (pmd == NULL)
20485+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20486+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20487+ if (pxd == NULL)
20488 failed = true;
20489- pmds[i] = pmd;
20490+ pxds[i] = pxd;
20491 }
20492
20493 if (failed) {
20494- free_pmds(pmds);
20495+ free_pxds(pxds);
20496 return -ENOMEM;
20497 }
20498
20499@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20500 * preallocate which never got a corresponding vma will need to be
20501 * freed manually.
20502 */
20503-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20504+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20505 {
20506 int i;
20507
20508- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20509+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20510 pgd_t pgd = pgdp[i];
20511
20512 if (pgd_val(pgd) != 0) {
20513- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20514+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20515
20516- pgdp[i] = native_make_pgd(0);
20517+ set_pgd(pgdp + i, native_make_pgd(0));
20518
20519- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20520- pmd_free(mm, pmd);
20521+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20522+ pxd_free(mm, pxd);
20523 }
20524 }
20525 }
20526
20527-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20528+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20529 {
20530- pud_t *pud;
20531+ pyd_t *pyd;
20532 unsigned long addr;
20533 int i;
20534
20535- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20536+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20537 return;
20538
20539- pud = pud_offset(pgd, 0);
20540+#ifdef CONFIG_X86_64
20541+ pyd = pyd_offset(mm, 0L);
20542+#else
20543+ pyd = pyd_offset(pgd, 0L);
20544+#endif
20545
20546- for (addr = i = 0; i < PREALLOCATED_PMDS;
20547- i++, pud++, addr += PUD_SIZE) {
20548- pmd_t *pmd = pmds[i];
20549+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20550+ i++, pyd++, addr += PYD_SIZE) {
20551+ pxd_t *pxd = pxds[i];
20552
20553 if (i >= KERNEL_PGD_BOUNDARY)
20554- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20555- sizeof(pmd_t) * PTRS_PER_PMD);
20556+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20557+ sizeof(pxd_t) * PTRS_PER_PMD);
20558
20559- pud_populate(mm, pud, pmd);
20560+ pyd_populate(mm, pyd, pxd);
20561 }
20562 }
20563
20564 pgd_t *pgd_alloc(struct mm_struct *mm)
20565 {
20566 pgd_t *pgd;
20567- pmd_t *pmds[PREALLOCATED_PMDS];
20568+ pxd_t *pxds[PREALLOCATED_PXDS];
20569
20570 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20571
20572@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20573
20574 mm->pgd = pgd;
20575
20576- if (preallocate_pmds(pmds) != 0)
20577+ if (preallocate_pxds(pxds) != 0)
20578 goto out_free_pgd;
20579
20580 if (paravirt_pgd_alloc(mm) != 0)
20581- goto out_free_pmds;
20582+ goto out_free_pxds;
20583
20584 /*
20585 * Make sure that pre-populating the pmds is atomic with
20586@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20587 spin_lock(&pgd_lock);
20588
20589 pgd_ctor(mm, pgd);
20590- pgd_prepopulate_pmd(mm, pgd, pmds);
20591+ pgd_prepopulate_pxd(mm, pgd, pxds);
20592
20593 spin_unlock(&pgd_lock);
20594
20595 return pgd;
20596
20597-out_free_pmds:
20598- free_pmds(pmds);
20599+out_free_pxds:
20600+ free_pxds(pxds);
20601 out_free_pgd:
20602 free_page((unsigned long)pgd);
20603 out:
20604@@ -295,7 +344,7 @@ out:
20605
20606 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20607 {
20608- pgd_mop_up_pmds(mm, pgd);
20609+ pgd_mop_up_pxds(mm, pgd);
20610 pgd_dtor(pgd);
20611 paravirt_pgd_free(mm, pgd);
20612 free_page((unsigned long)pgd);
20613diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20614--- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20615+++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20616@@ -5,8 +5,10 @@
20617 #include <asm/pgtable.h>
20618 #include <asm/proto.h>
20619
20620+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20621 static int disable_nx __cpuinitdata;
20622
20623+#ifndef CONFIG_PAX_PAGEEXEC
20624 /*
20625 * noexec = on|off
20626 *
20627@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20628 return 0;
20629 }
20630 early_param("noexec", noexec_setup);
20631+#endif
20632+
20633+#endif
20634
20635 void __cpuinit x86_configure_nx(void)
20636 {
20637+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20638 if (cpu_has_nx && !disable_nx)
20639 __supported_pte_mask |= _PAGE_NX;
20640 else
20641+#endif
20642 __supported_pte_mask &= ~_PAGE_NX;
20643 }
20644
20645diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20646--- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20647+++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20648@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20649 BUG();
20650 cpumask_clear_cpu(cpu,
20651 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20652+
20653+#ifndef CONFIG_PAX_PER_CPU_PGD
20654 load_cr3(swapper_pg_dir);
20655+#endif
20656+
20657 }
20658 EXPORT_SYMBOL_GPL(leave_mm);
20659
20660diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20661--- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20662+++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20663@@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20664 struct stack_frame_ia32 *fp;
20665
20666 /* Also check accessibility of one struct frame_head beyond */
20667- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20668+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20669 return NULL;
20670 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20671 return NULL;
20672@@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20673 {
20674 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20675
20676- if (!user_mode_vm(regs)) {
20677+ if (!user_mode(regs)) {
20678 unsigned long stack = kernel_stack_pointer(regs);
20679 if (depth)
20680 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20681diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20682--- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20683+++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20684@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20685 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20686 pci_mmcfg_late_init();
20687 pcibios_enable_irq = mrst_pci_irq_enable;
20688- pci_root_ops = pci_mrst_ops;
20689+ pax_open_kernel();
20690+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20691+ pax_close_kernel();
20692 /* Continue with standard init */
20693 return 1;
20694 }
20695diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20696--- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20697+++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20698@@ -79,50 +79,93 @@ union bios32 {
20699 static struct {
20700 unsigned long address;
20701 unsigned short segment;
20702-} bios32_indirect = { 0, __KERNEL_CS };
20703+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20704
20705 /*
20706 * Returns the entry point for the given service, NULL on error
20707 */
20708
20709-static unsigned long bios32_service(unsigned long service)
20710+static unsigned long __devinit bios32_service(unsigned long service)
20711 {
20712 unsigned char return_code; /* %al */
20713 unsigned long address; /* %ebx */
20714 unsigned long length; /* %ecx */
20715 unsigned long entry; /* %edx */
20716 unsigned long flags;
20717+ struct desc_struct d, *gdt;
20718
20719 local_irq_save(flags);
20720- __asm__("lcall *(%%edi); cld"
20721+
20722+ gdt = get_cpu_gdt_table(smp_processor_id());
20723+
20724+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20725+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20726+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20727+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20728+
20729+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20730 : "=a" (return_code),
20731 "=b" (address),
20732 "=c" (length),
20733 "=d" (entry)
20734 : "0" (service),
20735 "1" (0),
20736- "D" (&bios32_indirect));
20737+ "D" (&bios32_indirect),
20738+ "r"(__PCIBIOS_DS)
20739+ : "memory");
20740+
20741+ pax_open_kernel();
20742+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20743+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20744+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20745+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20746+ pax_close_kernel();
20747+
20748 local_irq_restore(flags);
20749
20750 switch (return_code) {
20751- case 0:
20752- return address + entry;
20753- case 0x80: /* Not present */
20754- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20755- return 0;
20756- default: /* Shouldn't happen */
20757- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20758- service, return_code);
20759+ case 0: {
20760+ int cpu;
20761+ unsigned char flags;
20762+
20763+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20764+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20765+ printk(KERN_WARNING "bios32_service: not valid\n");
20766 return 0;
20767+ }
20768+ address = address + PAGE_OFFSET;
20769+ length += 16UL; /* some BIOSs underreport this... */
20770+ flags = 4;
20771+ if (length >= 64*1024*1024) {
20772+ length >>= PAGE_SHIFT;
20773+ flags |= 8;
20774+ }
20775+
20776+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20777+ gdt = get_cpu_gdt_table(cpu);
20778+ pack_descriptor(&d, address, length, 0x9b, flags);
20779+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20780+ pack_descriptor(&d, address, length, 0x93, flags);
20781+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20782+ }
20783+ return entry;
20784+ }
20785+ case 0x80: /* Not present */
20786+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20787+ return 0;
20788+ default: /* Shouldn't happen */
20789+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20790+ service, return_code);
20791+ return 0;
20792 }
20793 }
20794
20795 static struct {
20796 unsigned long address;
20797 unsigned short segment;
20798-} pci_indirect = { 0, __KERNEL_CS };
20799+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20800
20801-static int pci_bios_present;
20802+static int pci_bios_present __read_only;
20803
20804 static int __devinit check_pcibios(void)
20805 {
20806@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20807 unsigned long flags, pcibios_entry;
20808
20809 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20810- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20811+ pci_indirect.address = pcibios_entry;
20812
20813 local_irq_save(flags);
20814- __asm__(
20815- "lcall *(%%edi); cld\n\t"
20816+ __asm__("movw %w6, %%ds\n\t"
20817+ "lcall *%%ss:(%%edi); cld\n\t"
20818+ "push %%ss\n\t"
20819+ "pop %%ds\n\t"
20820 "jc 1f\n\t"
20821 "xor %%ah, %%ah\n"
20822 "1:"
20823@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20824 "=b" (ebx),
20825 "=c" (ecx)
20826 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20827- "D" (&pci_indirect)
20828+ "D" (&pci_indirect),
20829+ "r" (__PCIBIOS_DS)
20830 : "memory");
20831 local_irq_restore(flags);
20832
20833@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20834
20835 switch (len) {
20836 case 1:
20837- __asm__("lcall *(%%esi); cld\n\t"
20838+ __asm__("movw %w6, %%ds\n\t"
20839+ "lcall *%%ss:(%%esi); cld\n\t"
20840+ "push %%ss\n\t"
20841+ "pop %%ds\n\t"
20842 "jc 1f\n\t"
20843 "xor %%ah, %%ah\n"
20844 "1:"
20845@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20846 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20847 "b" (bx),
20848 "D" ((long)reg),
20849- "S" (&pci_indirect));
20850+ "S" (&pci_indirect),
20851+ "r" (__PCIBIOS_DS));
20852 /*
20853 * Zero-extend the result beyond 8 bits, do not trust the
20854 * BIOS having done it:
20855@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20856 *value &= 0xff;
20857 break;
20858 case 2:
20859- __asm__("lcall *(%%esi); cld\n\t"
20860+ __asm__("movw %w6, %%ds\n\t"
20861+ "lcall *%%ss:(%%esi); cld\n\t"
20862+ "push %%ss\n\t"
20863+ "pop %%ds\n\t"
20864 "jc 1f\n\t"
20865 "xor %%ah, %%ah\n"
20866 "1:"
20867@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20868 : "1" (PCIBIOS_READ_CONFIG_WORD),
20869 "b" (bx),
20870 "D" ((long)reg),
20871- "S" (&pci_indirect));
20872+ "S" (&pci_indirect),
20873+ "r" (__PCIBIOS_DS));
20874 /*
20875 * Zero-extend the result beyond 16 bits, do not trust the
20876 * BIOS having done it:
20877@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20878 *value &= 0xffff;
20879 break;
20880 case 4:
20881- __asm__("lcall *(%%esi); cld\n\t"
20882+ __asm__("movw %w6, %%ds\n\t"
20883+ "lcall *%%ss:(%%esi); cld\n\t"
20884+ "push %%ss\n\t"
20885+ "pop %%ds\n\t"
20886 "jc 1f\n\t"
20887 "xor %%ah, %%ah\n"
20888 "1:"
20889@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20890 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20891 "b" (bx),
20892 "D" ((long)reg),
20893- "S" (&pci_indirect));
20894+ "S" (&pci_indirect),
20895+ "r" (__PCIBIOS_DS));
20896 break;
20897 }
20898
20899@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20900
20901 switch (len) {
20902 case 1:
20903- __asm__("lcall *(%%esi); cld\n\t"
20904+ __asm__("movw %w6, %%ds\n\t"
20905+ "lcall *%%ss:(%%esi); cld\n\t"
20906+ "push %%ss\n\t"
20907+ "pop %%ds\n\t"
20908 "jc 1f\n\t"
20909 "xor %%ah, %%ah\n"
20910 "1:"
20911@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20912 "c" (value),
20913 "b" (bx),
20914 "D" ((long)reg),
20915- "S" (&pci_indirect));
20916+ "S" (&pci_indirect),
20917+ "r" (__PCIBIOS_DS));
20918 break;
20919 case 2:
20920- __asm__("lcall *(%%esi); cld\n\t"
20921+ __asm__("movw %w6, %%ds\n\t"
20922+ "lcall *%%ss:(%%esi); cld\n\t"
20923+ "push %%ss\n\t"
20924+ "pop %%ds\n\t"
20925 "jc 1f\n\t"
20926 "xor %%ah, %%ah\n"
20927 "1:"
20928@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20929 "c" (value),
20930 "b" (bx),
20931 "D" ((long)reg),
20932- "S" (&pci_indirect));
20933+ "S" (&pci_indirect),
20934+ "r" (__PCIBIOS_DS));
20935 break;
20936 case 4:
20937- __asm__("lcall *(%%esi); cld\n\t"
20938+ __asm__("movw %w6, %%ds\n\t"
20939+ "lcall *%%ss:(%%esi); cld\n\t"
20940+ "push %%ss\n\t"
20941+ "pop %%ds\n\t"
20942 "jc 1f\n\t"
20943 "xor %%ah, %%ah\n"
20944 "1:"
20945@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20946 "c" (value),
20947 "b" (bx),
20948 "D" ((long)reg),
20949- "S" (&pci_indirect));
20950+ "S" (&pci_indirect),
20951+ "r" (__PCIBIOS_DS));
20952 break;
20953 }
20954
20955@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20956
20957 DBG("PCI: Fetching IRQ routing table... ");
20958 __asm__("push %%es\n\t"
20959+ "movw %w8, %%ds\n\t"
20960 "push %%ds\n\t"
20961 "pop %%es\n\t"
20962- "lcall *(%%esi); cld\n\t"
20963+ "lcall *%%ss:(%%esi); cld\n\t"
20964 "pop %%es\n\t"
20965+ "push %%ss\n\t"
20966+ "pop %%ds\n"
20967 "jc 1f\n\t"
20968 "xor %%ah, %%ah\n"
20969 "1:"
20970@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20971 "1" (0),
20972 "D" ((long) &opt),
20973 "S" (&pci_indirect),
20974- "m" (opt)
20975+ "m" (opt),
20976+ "r" (__PCIBIOS_DS)
20977 : "memory");
20978 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20979 if (ret & 0xff00)
20980@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20981 {
20982 int ret;
20983
20984- __asm__("lcall *(%%esi); cld\n\t"
20985+ __asm__("movw %w5, %%ds\n\t"
20986+ "lcall *%%ss:(%%esi); cld\n\t"
20987+ "push %%ss\n\t"
20988+ "pop %%ds\n"
20989 "jc 1f\n\t"
20990 "xor %%ah, %%ah\n"
20991 "1:"
20992@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20993 : "0" (PCIBIOS_SET_PCI_HW_INT),
20994 "b" ((dev->bus->number << 8) | dev->devfn),
20995 "c" ((irq << 8) | (pin + 10)),
20996- "S" (&pci_indirect));
20997+ "S" (&pci_indirect),
20998+ "r" (__PCIBIOS_DS));
20999 return !(ret & 0xff00);
21000 }
21001 EXPORT_SYMBOL(pcibios_set_irq_routing);
21002diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21003--- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21004+++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21005@@ -38,70 +38,37 @@
21006 */
21007
21008 static unsigned long efi_rt_eflags;
21009-static pgd_t efi_bak_pg_dir_pointer[2];
21010+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21011
21012-void efi_call_phys_prelog(void)
21013+void __init efi_call_phys_prelog(void)
21014 {
21015- unsigned long cr4;
21016- unsigned long temp;
21017 struct desc_ptr gdt_descr;
21018
21019 local_irq_save(efi_rt_eflags);
21020
21021- /*
21022- * If I don't have PAE, I should just duplicate two entries in page
21023- * directory. If I have PAE, I just need to duplicate one entry in
21024- * page directory.
21025- */
21026- cr4 = read_cr4_safe();
21027-
21028- if (cr4 & X86_CR4_PAE) {
21029- efi_bak_pg_dir_pointer[0].pgd =
21030- swapper_pg_dir[pgd_index(0)].pgd;
21031- swapper_pg_dir[0].pgd =
21032- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21033- } else {
21034- efi_bak_pg_dir_pointer[0].pgd =
21035- swapper_pg_dir[pgd_index(0)].pgd;
21036- efi_bak_pg_dir_pointer[1].pgd =
21037- swapper_pg_dir[pgd_index(0x400000)].pgd;
21038- swapper_pg_dir[pgd_index(0)].pgd =
21039- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21040- temp = PAGE_OFFSET + 0x400000;
21041- swapper_pg_dir[pgd_index(0x400000)].pgd =
21042- swapper_pg_dir[pgd_index(temp)].pgd;
21043- }
21044+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21045+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21046+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21047
21048 /*
21049 * After the lock is released, the original page table is restored.
21050 */
21051 __flush_tlb_all();
21052
21053- gdt_descr.address = __pa(get_cpu_gdt_table(0));
21054+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21055 gdt_descr.size = GDT_SIZE - 1;
21056 load_gdt(&gdt_descr);
21057 }
21058
21059-void efi_call_phys_epilog(void)
21060+void __init efi_call_phys_epilog(void)
21061 {
21062- unsigned long cr4;
21063 struct desc_ptr gdt_descr;
21064
21065- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21066+ gdt_descr.address = get_cpu_gdt_table(0);
21067 gdt_descr.size = GDT_SIZE - 1;
21068 load_gdt(&gdt_descr);
21069
21070- cr4 = read_cr4_safe();
21071-
21072- if (cr4 & X86_CR4_PAE) {
21073- swapper_pg_dir[pgd_index(0)].pgd =
21074- efi_bak_pg_dir_pointer[0].pgd;
21075- } else {
21076- swapper_pg_dir[pgd_index(0)].pgd =
21077- efi_bak_pg_dir_pointer[0].pgd;
21078- swapper_pg_dir[pgd_index(0x400000)].pgd =
21079- efi_bak_pg_dir_pointer[1].pgd;
21080- }
21081+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21082
21083 /*
21084 * After the lock is released, the original page table is restored.
21085diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21086--- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21087+++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21088@@ -6,6 +6,7 @@
21089 */
21090
21091 #include <linux/linkage.h>
21092+#include <linux/init.h>
21093 #include <asm/page_types.h>
21094
21095 /*
21096@@ -20,7 +21,7 @@
21097 * service functions will comply with gcc calling convention, too.
21098 */
21099
21100-.text
21101+__INIT
21102 ENTRY(efi_call_phys)
21103 /*
21104 * 0. The function can only be called in Linux kernel. So CS has been
21105@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21106 * The mapping of lower virtual memory has been created in prelog and
21107 * epilog.
21108 */
21109- movl $1f, %edx
21110- subl $__PAGE_OFFSET, %edx
21111- jmp *%edx
21112+ jmp 1f-__PAGE_OFFSET
21113 1:
21114
21115 /*
21116@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21117 * parameter 2, ..., param n. To make things easy, we save the return
21118 * address of efi_call_phys in a global variable.
21119 */
21120- popl %edx
21121- movl %edx, saved_return_addr
21122- /* get the function pointer into ECX*/
21123- popl %ecx
21124- movl %ecx, efi_rt_function_ptr
21125- movl $2f, %edx
21126- subl $__PAGE_OFFSET, %edx
21127- pushl %edx
21128+ popl (saved_return_addr)
21129+ popl (efi_rt_function_ptr)
21130
21131 /*
21132 * 3. Clear PG bit in %CR0.
21133@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21134 /*
21135 * 5. Call the physical function.
21136 */
21137- jmp *%ecx
21138+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21139
21140-2:
21141 /*
21142 * 6. After EFI runtime service returns, control will return to
21143 * following instruction. We'd better readjust stack pointer first.
21144@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21145 movl %cr0, %edx
21146 orl $0x80000000, %edx
21147 movl %edx, %cr0
21148- jmp 1f
21149-1:
21150+
21151 /*
21152 * 8. Now restore the virtual mode from flat mode by
21153 * adding EIP with PAGE_OFFSET.
21154 */
21155- movl $1f, %edx
21156- jmp *%edx
21157+ jmp 1f+__PAGE_OFFSET
21158 1:
21159
21160 /*
21161 * 9. Balance the stack. And because EAX contain the return value,
21162 * we'd better not clobber it.
21163 */
21164- leal efi_rt_function_ptr, %edx
21165- movl (%edx), %ecx
21166- pushl %ecx
21167+ pushl (efi_rt_function_ptr)
21168
21169 /*
21170- * 10. Push the saved return address onto the stack and return.
21171+ * 10. Return to the saved return address.
21172 */
21173- leal saved_return_addr, %edx
21174- movl (%edx), %ecx
21175- pushl %ecx
21176- ret
21177+ jmpl *(saved_return_addr)
21178 ENDPROC(efi_call_phys)
21179 .previous
21180
21181-.data
21182+__INITDATA
21183 saved_return_addr:
21184 .long 0
21185 efi_rt_function_ptr:
21186diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21187--- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21188+++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21189@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21190 }
21191
21192 /* Reboot and power off are handled by the SCU on a MID device */
21193-static void mrst_power_off(void)
21194+static __noreturn void mrst_power_off(void)
21195 {
21196 intel_scu_ipc_simple_command(0xf1, 1);
21197+ BUG();
21198 }
21199
21200-static void mrst_reboot(void)
21201+static __noreturn void mrst_reboot(void)
21202 {
21203 intel_scu_ipc_simple_command(0xf1, 0);
21204+ BUG();
21205 }
21206
21207 /*
21208diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21209--- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21210+++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21211@@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21212 cpumask_t mask;
21213 struct reset_args reset_args;
21214
21215+ pax_track_stack();
21216+
21217 reset_args.sender = sender;
21218
21219 cpus_clear(mask);
21220diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21221--- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21222+++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21223@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21224 static void fix_processor_context(void)
21225 {
21226 int cpu = smp_processor_id();
21227- struct tss_struct *t = &per_cpu(init_tss, cpu);
21228+ struct tss_struct *t = init_tss + cpu;
21229
21230 set_tss_desc(cpu, t); /*
21231 * This just modifies memory; should not be
21232@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21233 */
21234
21235 #ifdef CONFIG_X86_64
21236+ pax_open_kernel();
21237 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21238+ pax_close_kernel();
21239
21240 syscall_init(); /* This sets MSR_*STAR and related */
21241 #endif
21242Binary files linux-2.6.39.4/arch/x86/tools/test_get_len and linux-2.6.39.4/arch/x86/tools/test_get_len differ
21243diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21244--- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21245+++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21246@@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21247 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21248 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21249
21250-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21251+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21252 GCOV_PROFILE := n
21253
21254 #
21255diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21256--- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21257+++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21258@@ -22,24 +22,48 @@
21259 #include <asm/hpet.h>
21260 #include <asm/unistd.h>
21261 #include <asm/io.h>
21262+#include <asm/fixmap.h>
21263 #include "vextern.h"
21264
21265 #define gtod vdso_vsyscall_gtod_data
21266
21267+notrace noinline long __vdso_fallback_time(long *t)
21268+{
21269+ long secs;
21270+ asm volatile("syscall"
21271+ : "=a" (secs)
21272+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21273+ return secs;
21274+}
21275+
21276 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21277 {
21278 long ret;
21279 asm("syscall" : "=a" (ret) :
21280- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21281+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21282 return ret;
21283 }
21284
21285+notrace static inline cycle_t __vdso_vread_hpet(void)
21286+{
21287+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21288+}
21289+
21290+notrace static inline cycle_t __vdso_vread_tsc(void)
21291+{
21292+ cycle_t ret = (cycle_t)vget_cycles();
21293+
21294+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21295+}
21296+
21297 notrace static inline long vgetns(void)
21298 {
21299 long v;
21300- cycles_t (*vread)(void);
21301- vread = gtod->clock.vread;
21302- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21303+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21304+ v = __vdso_vread_tsc();
21305+ else
21306+ v = __vdso_vread_hpet();
21307+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21308 return (v * gtod->clock.mult) >> gtod->clock.shift;
21309 }
21310
21311@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21312
21313 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21314 {
21315- if (likely(gtod->sysctl_enabled))
21316+ if (likely(gtod->sysctl_enabled &&
21317+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21318+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21319 switch (clock) {
21320 case CLOCK_REALTIME:
21321 if (likely(gtod->clock.vread))
21322@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21323 int clock_gettime(clockid_t, struct timespec *)
21324 __attribute__((weak, alias("__vdso_clock_gettime")));
21325
21326-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21327+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21328 {
21329 long ret;
21330- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21331+ asm("syscall" : "=a" (ret) :
21332+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21333+ return ret;
21334+}
21335+
21336+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21337+{
21338+ if (likely(gtod->sysctl_enabled &&
21339+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21340+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21341+ {
21342 if (likely(tv != NULL)) {
21343 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21344 offsetof(struct timespec, tv_nsec) ||
21345@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21346 }
21347 return 0;
21348 }
21349- asm("syscall" : "=a" (ret) :
21350- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21351- return ret;
21352+ return __vdso_fallback_gettimeofday(tv, tz);
21353 }
21354 int gettimeofday(struct timeval *, struct timezone *)
21355 __attribute__((weak, alias("__vdso_gettimeofday")));
21356diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21357--- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21358+++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21359@@ -25,6 +25,7 @@
21360 #include <asm/tlbflush.h>
21361 #include <asm/vdso.h>
21362 #include <asm/proto.h>
21363+#include <asm/mman.h>
21364
21365 enum {
21366 VDSO_DISABLED = 0,
21367@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21368 void enable_sep_cpu(void)
21369 {
21370 int cpu = get_cpu();
21371- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21372+ struct tss_struct *tss = init_tss + cpu;
21373
21374 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21375 put_cpu();
21376@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21377 gate_vma.vm_start = FIXADDR_USER_START;
21378 gate_vma.vm_end = FIXADDR_USER_END;
21379 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21380- gate_vma.vm_page_prot = __P101;
21381+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21382 /*
21383 * Make sure the vDSO gets into every core dump.
21384 * Dumping its contents makes post-mortem fully interpretable later
21385@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21386 if (compat)
21387 addr = VDSO_HIGH_BASE;
21388 else {
21389- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21390+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21391 if (IS_ERR_VALUE(addr)) {
21392 ret = addr;
21393 goto up_fail;
21394 }
21395 }
21396
21397- current->mm->context.vdso = (void *)addr;
21398+ current->mm->context.vdso = addr;
21399
21400 if (compat_uses_vma || !compat) {
21401 /*
21402@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21403 }
21404
21405 current_thread_info()->sysenter_return =
21406- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21407+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21408
21409 up_fail:
21410 if (ret)
21411- current->mm->context.vdso = NULL;
21412+ current->mm->context.vdso = 0;
21413
21414 up_write(&mm->mmap_sem);
21415
21416@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21417
21418 const char *arch_vma_name(struct vm_area_struct *vma)
21419 {
21420- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21421+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21422 return "[vdso]";
21423+
21424+#ifdef CONFIG_PAX_SEGMEXEC
21425+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21426+ return "[vdso]";
21427+#endif
21428+
21429 return NULL;
21430 }
21431
21432@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21433 * Check to see if the corresponding task was created in compat vdso
21434 * mode.
21435 */
21436- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21437+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21438 return &gate_vma;
21439 return NULL;
21440 }
21441diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21442--- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21443+++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21444@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21445 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21446 #include "vextern.h"
21447 #undef VEXTERN
21448+
21449+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21450+VEXTERN(fallback_gettimeofday)
21451+VEXTERN(fallback_time)
21452+VEXTERN(getcpu)
21453+#undef VEXTERN
21454diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21455--- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21456+++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21457@@ -11,6 +11,5 @@
21458 put into vextern.h and be referenced as a pointer with vdso prefix.
21459 The main kernel later fills in the values. */
21460
21461-VEXTERN(jiffies)
21462 VEXTERN(vgetcpu_mode)
21463 VEXTERN(vsyscall_gtod_data)
21464diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21465--- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21466+++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21467@@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21468 if (!vbase)
21469 goto oom;
21470
21471- if (memcmp(vbase, "\177ELF", 4)) {
21472+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
21473 printk("VDSO: I'm broken; not ELF\n");
21474 vdso_enabled = 0;
21475 }
21476@@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21477 goto up_fail;
21478 }
21479
21480- current->mm->context.vdso = (void *)addr;
21481+ current->mm->context.vdso = addr;
21482
21483 ret = install_special_mapping(mm, addr, vdso_size,
21484 VM_READ|VM_EXEC|
21485@@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21486 VM_ALWAYSDUMP,
21487 vdso_pages);
21488 if (ret) {
21489- current->mm->context.vdso = NULL;
21490+ current->mm->context.vdso = 0;
21491 goto up_fail;
21492 }
21493
21494@@ -134,10 +134,3 @@ up_fail:
21495 up_write(&mm->mmap_sem);
21496 return ret;
21497 }
21498-
21499-static __init int vdso_setup(char *s)
21500-{
21501- vdso_enabled = simple_strtoul(s, NULL, 0);
21502- return 0;
21503-}
21504-__setup("vdso=", vdso_setup);
21505diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21506--- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21507+++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21508@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21509
21510 struct shared_info xen_dummy_shared_info;
21511
21512-void *xen_initial_gdt;
21513-
21514 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21515 __read_mostly int xen_have_vector_callback;
21516 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21517@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21518 #endif
21519 };
21520
21521-static void xen_reboot(int reason)
21522+static __noreturn void xen_reboot(int reason)
21523 {
21524 struct sched_shutdown r = { .reason = reason };
21525
21526@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21527 BUG();
21528 }
21529
21530-static void xen_restart(char *msg)
21531+static __noreturn void xen_restart(char *msg)
21532 {
21533 xen_reboot(SHUTDOWN_reboot);
21534 }
21535
21536-static void xen_emergency_restart(void)
21537+static __noreturn void xen_emergency_restart(void)
21538 {
21539 xen_reboot(SHUTDOWN_reboot);
21540 }
21541
21542-static void xen_machine_halt(void)
21543+static __noreturn void xen_machine_halt(void)
21544 {
21545 xen_reboot(SHUTDOWN_poweroff);
21546 }
21547@@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21548 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21549
21550 /* Work out if we support NX */
21551- x86_configure_nx();
21552+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21553+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21554+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21555+ unsigned l, h;
21556+
21557+ __supported_pte_mask |= _PAGE_NX;
21558+ rdmsr(MSR_EFER, l, h);
21559+ l |= EFER_NX;
21560+ wrmsr(MSR_EFER, l, h);
21561+ }
21562+#endif
21563
21564 xen_setup_features();
21565
21566@@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21567
21568 machine_ops = xen_machine_ops;
21569
21570- /*
21571- * The only reliable way to retain the initial address of the
21572- * percpu gdt_page is to remember it here, so we can go and
21573- * mark it RW later, when the initial percpu area is freed.
21574- */
21575- xen_initial_gdt = &per_cpu(gdt_page, 0);
21576-
21577 xen_smp_init();
21578
21579 #ifdef CONFIG_ACPI_NUMA
21580diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21581--- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21582+++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21583@@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21584 convert_pfn_mfn(init_level4_pgt);
21585 convert_pfn_mfn(level3_ident_pgt);
21586 convert_pfn_mfn(level3_kernel_pgt);
21587+ convert_pfn_mfn(level3_vmalloc_pgt);
21588+ convert_pfn_mfn(level3_vmemmap_pgt);
21589
21590 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21591 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21592@@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21593 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21594 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21595 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21596+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21597+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21598 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21599+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21600 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21601 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21602
21603diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21604--- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21605+++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21606@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21607 {
21608 BUG_ON(smp_processor_id() != 0);
21609 native_smp_prepare_boot_cpu();
21610-
21611- /* We've switched to the "real" per-cpu gdt, so make sure the
21612- old memory can be recycled */
21613- make_lowmem_page_readwrite(xen_initial_gdt);
21614-
21615 xen_filter_cpu_maps();
21616 xen_setup_vcpu_info_placement();
21617 }
21618@@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21619 gdt = get_cpu_gdt_table(cpu);
21620
21621 ctxt->flags = VGCF_IN_KERNEL;
21622- ctxt->user_regs.ds = __USER_DS;
21623- ctxt->user_regs.es = __USER_DS;
21624+ ctxt->user_regs.ds = __KERNEL_DS;
21625+ ctxt->user_regs.es = __KERNEL_DS;
21626 ctxt->user_regs.ss = __KERNEL_DS;
21627 #ifdef CONFIG_X86_32
21628 ctxt->user_regs.fs = __KERNEL_PERCPU;
21629- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21630+ savesegment(gs, ctxt->user_regs.gs);
21631 #else
21632 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21633 #endif
21634@@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21635 int rc;
21636
21637 per_cpu(current_task, cpu) = idle;
21638+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
21639 #ifdef CONFIG_X86_32
21640 irq_ctx_init(cpu);
21641 #else
21642 clear_tsk_thread_flag(idle, TIF_FORK);
21643- per_cpu(kernel_stack, cpu) =
21644- (unsigned long)task_stack_page(idle) -
21645- KERNEL_STACK_OFFSET + THREAD_SIZE;
21646+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21647 #endif
21648 xen_setup_runstate_info(cpu);
21649 xen_setup_timer(cpu);
21650diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21651--- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21652+++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21653@@ -83,14 +83,14 @@ ENTRY(xen_iret)
21654 ESP_OFFSET=4 # bytes pushed onto stack
21655
21656 /*
21657- * Store vcpu_info pointer for easy access. Do it this way to
21658- * avoid having to reload %fs
21659+ * Store vcpu_info pointer for easy access.
21660 */
21661 #ifdef CONFIG_SMP
21662- GET_THREAD_INFO(%eax)
21663- movl TI_cpu(%eax), %eax
21664- movl __per_cpu_offset(,%eax,4), %eax
21665- mov xen_vcpu(%eax), %eax
21666+ push %fs
21667+ mov $(__KERNEL_PERCPU), %eax
21668+ mov %eax, %fs
21669+ mov PER_CPU_VAR(xen_vcpu), %eax
21670+ pop %fs
21671 #else
21672 movl xen_vcpu, %eax
21673 #endif
21674diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21675--- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21676+++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21677@@ -19,6 +19,17 @@ ENTRY(startup_xen)
21678 #ifdef CONFIG_X86_32
21679 mov %esi,xen_start_info
21680 mov $init_thread_union+THREAD_SIZE,%esp
21681+#ifdef CONFIG_SMP
21682+ movl $cpu_gdt_table,%edi
21683+ movl $__per_cpu_load,%eax
21684+ movw %ax,__KERNEL_PERCPU + 2(%edi)
21685+ rorl $16,%eax
21686+ movb %al,__KERNEL_PERCPU + 4(%edi)
21687+ movb %ah,__KERNEL_PERCPU + 7(%edi)
21688+ movl $__per_cpu_end - 1,%eax
21689+ subl $__per_cpu_start,%eax
21690+ movw %ax,__KERNEL_PERCPU + 0(%edi)
21691+#endif
21692 #else
21693 mov %rsi,xen_start_info
21694 mov $init_thread_union+THREAD_SIZE,%rsp
21695diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21696--- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21697+++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21698@@ -10,8 +10,6 @@
21699 extern const char xen_hypervisor_callback[];
21700 extern const char xen_failsafe_callback[];
21701
21702-extern void *xen_initial_gdt;
21703-
21704 struct trap_info;
21705 void xen_copy_trap_info(struct trap_info *traps);
21706
21707diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21708--- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21709+++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21710@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21711 }
21712 EXPORT_SYMBOL(blk_iopoll_complete);
21713
21714-static void blk_iopoll_softirq(struct softirq_action *h)
21715+static void blk_iopoll_softirq(void)
21716 {
21717 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21718 int rearm = 0, budget = blk_iopoll_budget;
21719diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21720--- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21721+++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21722@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21723 if (!len || !kbuf)
21724 return -EINVAL;
21725
21726- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21727+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21728 if (do_copy)
21729 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21730 else
21731diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21732--- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21733+++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21734@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21735 * Softirq action handler - move entries to local list and loop over them
21736 * while passing them to the queue registered handler.
21737 */
21738-static void blk_done_softirq(struct softirq_action *h)
21739+static void blk_done_softirq(void)
21740 {
21741 struct list_head *cpu_list, local_list;
21742
21743diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21744--- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21745+++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21746@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21747 struct sg_io_v4 *hdr, struct bsg_device *bd,
21748 fmode_t has_write_perm)
21749 {
21750+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21751+ unsigned char *cmdptr;
21752+
21753 if (hdr->request_len > BLK_MAX_CDB) {
21754 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21755 if (!rq->cmd)
21756 return -ENOMEM;
21757- }
21758+ cmdptr = rq->cmd;
21759+ } else
21760+ cmdptr = tmpcmd;
21761
21762- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21763+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21764 hdr->request_len))
21765 return -EFAULT;
21766
21767+ if (cmdptr != rq->cmd)
21768+ memcpy(rq->cmd, cmdptr, hdr->request_len);
21769+
21770 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21771 if (blk_verify_command(rq->cmd, has_write_perm))
21772 return -EPERM;
21773diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21774--- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21775+++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21776@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21777 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21778 struct sg_io_hdr *hdr, fmode_t mode)
21779 {
21780- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21781+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21782+ unsigned char *cmdptr;
21783+
21784+ if (rq->cmd != rq->__cmd)
21785+ cmdptr = rq->cmd;
21786+ else
21787+ cmdptr = tmpcmd;
21788+
21789+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21790 return -EFAULT;
21791+
21792+ if (cmdptr != rq->cmd)
21793+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21794+
21795 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21796 return -EPERM;
21797
21798@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21799 int err;
21800 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21801 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21802+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21803+ unsigned char *cmdptr;
21804
21805 if (!sic)
21806 return -EINVAL;
21807@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21808 */
21809 err = -EFAULT;
21810 rq->cmd_len = cmdlen;
21811- if (copy_from_user(rq->cmd, sic->data, cmdlen))
21812+
21813+ if (rq->cmd != rq->__cmd)
21814+ cmdptr = rq->cmd;
21815+ else
21816+ cmdptr = tmpcmd;
21817+
21818+ if (copy_from_user(cmdptr, sic->data, cmdlen))
21819 goto error;
21820
21821+ if (rq->cmd != cmdptr)
21822+ memcpy(rq->cmd, cmdptr, cmdlen);
21823+
21824 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21825 goto error;
21826
21827diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21828--- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21829+++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21830@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21831
21832 struct cryptd_blkcipher_request_ctx {
21833 crypto_completion_t complete;
21834-};
21835+} __no_const;
21836
21837 struct cryptd_hash_ctx {
21838 struct crypto_shash *child;
21839@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21840
21841 struct cryptd_aead_request_ctx {
21842 crypto_completion_t complete;
21843-};
21844+} __no_const;
21845
21846 static void cryptd_queue_worker(struct work_struct *work);
21847
21848diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21849--- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21850+++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21851@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21852 for (i = 0; i < 7; ++i)
21853 gf128mul_x_lle(&p[i + 1], &p[i]);
21854
21855- memset(r, 0, sizeof(r));
21856+ memset(r, 0, sizeof(*r));
21857 for (i = 0;;) {
21858 u8 ch = ((u8 *)b)[15 - i];
21859
21860@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21861 for (i = 0; i < 7; ++i)
21862 gf128mul_x_bbe(&p[i + 1], &p[i]);
21863
21864- memset(r, 0, sizeof(r));
21865+ memset(r, 0, sizeof(*r));
21866 for (i = 0;;) {
21867 u8 ch = ((u8 *)b)[i];
21868
21869diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21870--- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21871+++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21872@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21873 u32 r0,r1,r2,r3,r4;
21874 int i;
21875
21876+ pax_track_stack();
21877+
21878 /* Copy key, add padding */
21879
21880 for (i = 0; i < keylen; ++i)
21881diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21882--- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21883+++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21884@@ -1,13 +1,16 @@
21885 *.a
21886 *.aux
21887 *.bin
21888+*.cis
21889 *.cpio
21890 *.csp
21891+*.dbg
21892 *.dsp
21893 *.dvi
21894 *.elf
21895 *.eps
21896 *.fw
21897+*.gcno
21898 *.gen.S
21899 *.gif
21900 *.grep
21901@@ -38,8 +41,10 @@
21902 *.tab.h
21903 *.tex
21904 *.ver
21905+*.vim
21906 *.xml
21907 *_MODULES
21908+*_reg_safe.h
21909 *_vga16.c
21910 *~
21911 *.9
21912@@ -49,11 +54,16 @@
21913 53c700_d.h
21914 CVS
21915 ChangeSet
21916+GPATH
21917+GRTAGS
21918+GSYMS
21919+GTAGS
21920 Image
21921 Kerntypes
21922 Module.markers
21923 Module.symvers
21924 PENDING
21925+PERF*
21926 SCCS
21927 System.map*
21928 TAGS
21929@@ -80,8 +90,11 @@ btfixupprep
21930 build
21931 bvmlinux
21932 bzImage*
21933+capability_names.h
21934 capflags.c
21935 classlist.h*
21936+clut_vga16.c
21937+common-cmds.h
21938 comp*.log
21939 compile.h*
21940 conf
21941@@ -106,16 +119,19 @@ fore200e_mkfirm
21942 fore200e_pca_fw.c*
21943 gconf
21944 gen-devlist
21945+gen-kdb_cmds.c
21946 gen_crc32table
21947 gen_init_cpio
21948 generated
21949 genheaders
21950 genksyms
21951 *_gray256.c
21952+hash
21953 ihex2fw
21954 ikconfig.h*
21955 inat-tables.c
21956 initramfs_data.cpio
21957+initramfs_data.cpio.bz2
21958 initramfs_data.cpio.gz
21959 initramfs_list
21960 int16.c
21961@@ -125,7 +141,6 @@ int32.c
21962 int4.c
21963 int8.c
21964 kallsyms
21965-kconfig
21966 keywords.c
21967 ksym.c*
21968 ksym.h*
21969@@ -149,7 +164,9 @@ mkboot
21970 mkbugboot
21971 mkcpustr
21972 mkdep
21973+mkpiggy
21974 mkprep
21975+mkregtable
21976 mktables
21977 mktree
21978 modpost
21979@@ -165,6 +182,7 @@ parse.h
21980 patches*
21981 pca200e.bin
21982 pca200e_ecd.bin2
21983+perf-archive
21984 piggy.gz
21985 piggyback
21986 piggy.S
21987@@ -180,7 +198,9 @@ r600_reg_safe.h
21988 raid6altivec*.c
21989 raid6int*.c
21990 raid6tables.c
21991+regdb.c
21992 relocs
21993+rlim_names.h
21994 rn50_reg_safe.h
21995 rs600_reg_safe.h
21996 rv515_reg_safe.h
21997@@ -189,6 +209,7 @@ setup
21998 setup.bin
21999 setup.elf
22000 sImage
22001+slabinfo
22002 sm_tbl*
22003 split-include
22004 syscalltab.h
22005@@ -213,13 +234,17 @@ version.h*
22006 vmlinux
22007 vmlinux-*
22008 vmlinux.aout
22009+vmlinux.bin.all
22010+vmlinux.bin.bz2
22011 vmlinux.lds
22012+vmlinux.relocs
22013 voffset.h
22014 vsyscall.lds
22015 vsyscall_32.lds
22016 wanxlfw.inc
22017 uImage
22018 unifdef
22019+utsrelease.h
22020 wakeup.bin
22021 wakeup.elf
22022 wakeup.lds
22023diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22024--- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22025+++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22026@@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22027 the specified number of seconds. This is to be used if
22028 your oopses keep scrolling off the screen.
22029
22030+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22031+ virtualization environments that don't cope well with the
22032+ expand down segment used by UDEREF on X86-32 or the frequent
22033+ page table updates on X86-64.
22034+
22035+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22036+
22037 pcbit= [HW,ISDN]
22038
22039 pcd. [PARIDE]
22040diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22041--- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22042+++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22043@@ -38,12 +38,12 @@
22044 */
22045 u64 cper_next_record_id(void)
22046 {
22047- static atomic64_t seq;
22048+ static atomic64_unchecked_t seq;
22049
22050- if (!atomic64_read(&seq))
22051- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22052+ if (!atomic64_read_unchecked(&seq))
22053+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22054
22055- return atomic64_inc_return(&seq);
22056+ return atomic64_inc_return_unchecked(&seq);
22057 }
22058 EXPORT_SYMBOL_GPL(cper_next_record_id);
22059
22060diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22061--- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22062+++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22063@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22064 return res;
22065
22066 temp /= 1000;
22067- if (temp < 0)
22068- return -EINVAL;
22069
22070 mutex_lock(&resource->lock);
22071 resource->trip[attr->index - 7] = temp;
22072diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22073--- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22074+++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22075@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22076 size_t count, loff_t * ppos)
22077 {
22078 struct list_head *node, *next;
22079- char strbuf[5];
22080- char str[5] = "";
22081- unsigned int len = count;
22082-
22083- if (len > 4)
22084- len = 4;
22085- if (len < 0)
22086- return -EFAULT;
22087+ char strbuf[5] = {0};
22088
22089- if (copy_from_user(strbuf, buffer, len))
22090+ if (count > 4)
22091+ count = 4;
22092+ if (copy_from_user(strbuf, buffer, count))
22093 return -EFAULT;
22094- strbuf[len] = '\0';
22095- sscanf(strbuf, "%s", str);
22096+ strbuf[count] = '\0';
22097
22098 mutex_lock(&acpi_device_lock);
22099 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22100@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22101 if (!dev->wakeup.flags.valid)
22102 continue;
22103
22104- if (!strncmp(dev->pnp.bus_id, str, 4)) {
22105+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22106 if (device_can_wakeup(&dev->dev)) {
22107 bool enable = !device_may_wakeup(&dev->dev);
22108 device_set_wakeup_enable(&dev->dev, enable);
22109diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22110--- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22111+++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22112@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22113 return 0;
22114 #endif
22115
22116- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22117+ BUG_ON(pr->id >= nr_cpu_ids);
22118
22119 /*
22120 * Buggy BIOS check
22121diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22122--- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22123+++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22124@@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22125 struct ata_port *ap;
22126 unsigned int tag;
22127
22128- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22129+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22130 ap = qc->ap;
22131
22132 qc->flags = 0;
22133@@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22134 struct ata_port *ap;
22135 struct ata_link *link;
22136
22137- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22138+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22139 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22140 ap = qc->ap;
22141 link = qc->dev->link;
22142@@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22143 return;
22144
22145 spin_lock(&lock);
22146+ pax_open_kernel();
22147
22148 for (cur = ops->inherits; cur; cur = cur->inherits) {
22149 void **inherit = (void **)cur;
22150@@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22151 if (IS_ERR(*pp))
22152 *pp = NULL;
22153
22154- ops->inherits = NULL;
22155+ *(struct ata_port_operations **)&ops->inherits = NULL;
22156
22157+ pax_close_kernel();
22158 spin_unlock(&lock);
22159 }
22160
22161diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22162--- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22163+++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22164@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22165 {
22166 struct ata_link *link;
22167
22168+ pax_track_stack();
22169+
22170 ata_for_each_link(link, ap, HOST_FIRST)
22171 ata_eh_link_report(link);
22172 }
22173diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22174--- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22175+++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22176@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22177 /* Handle platform specific quirks */
22178 if (pdata->quirk) {
22179 if (pdata->quirk & CF_BROKEN_PIO) {
22180- ap->ops->set_piomode = NULL;
22181+ pax_open_kernel();
22182+ *(void **)&ap->ops->set_piomode = NULL;
22183+ pax_close_kernel();
22184 ap->pio_mask = 0;
22185 }
22186 if (pdata->quirk & CF_BROKEN_MWDMA)
22187diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22188--- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22189+++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22190@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22191 vcc->pop(vcc, skb);
22192 else
22193 dev_kfree_skb_any(skb);
22194- atomic_inc(&vcc->stats->tx);
22195+ atomic_inc_unchecked(&vcc->stats->tx);
22196
22197 return 0;
22198 }
22199diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22200--- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22201+++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22202@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22203 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22204
22205 // VC layer stats
22206- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22207+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22208
22209 // free the descriptor
22210 kfree (tx_descr);
22211@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22212 dump_skb ("<<<", vc, skb);
22213
22214 // VC layer stats
22215- atomic_inc(&atm_vcc->stats->rx);
22216+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22217 __net_timestamp(skb);
22218 // end of our responsibility
22219 atm_vcc->push (atm_vcc, skb);
22220@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22221 } else {
22222 PRINTK (KERN_INFO, "dropped over-size frame");
22223 // should we count this?
22224- atomic_inc(&atm_vcc->stats->rx_drop);
22225+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22226 }
22227
22228 } else {
22229@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22230 }
22231
22232 if (check_area (skb->data, skb->len)) {
22233- atomic_inc(&atm_vcc->stats->tx_err);
22234+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22235 return -ENOMEM; // ?
22236 }
22237
22238diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22239--- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22240+++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22241@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22242 if (vcc->pop) vcc->pop(vcc,skb);
22243 else dev_kfree_skb(skb);
22244 if (dev_data) return 0;
22245- atomic_inc(&vcc->stats->tx_err);
22246+ atomic_inc_unchecked(&vcc->stats->tx_err);
22247 return -ENOLINK;
22248 }
22249 size = skb->len+sizeof(struct atmtcp_hdr);
22250@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22251 if (!new_skb) {
22252 if (vcc->pop) vcc->pop(vcc,skb);
22253 else dev_kfree_skb(skb);
22254- atomic_inc(&vcc->stats->tx_err);
22255+ atomic_inc_unchecked(&vcc->stats->tx_err);
22256 return -ENOBUFS;
22257 }
22258 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22259@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22260 if (vcc->pop) vcc->pop(vcc,skb);
22261 else dev_kfree_skb(skb);
22262 out_vcc->push(out_vcc,new_skb);
22263- atomic_inc(&vcc->stats->tx);
22264- atomic_inc(&out_vcc->stats->rx);
22265+ atomic_inc_unchecked(&vcc->stats->tx);
22266+ atomic_inc_unchecked(&out_vcc->stats->rx);
22267 return 0;
22268 }
22269
22270@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22271 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22272 read_unlock(&vcc_sklist_lock);
22273 if (!out_vcc) {
22274- atomic_inc(&vcc->stats->tx_err);
22275+ atomic_inc_unchecked(&vcc->stats->tx_err);
22276 goto done;
22277 }
22278 skb_pull(skb,sizeof(struct atmtcp_hdr));
22279@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22280 __net_timestamp(new_skb);
22281 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22282 out_vcc->push(out_vcc,new_skb);
22283- atomic_inc(&vcc->stats->tx);
22284- atomic_inc(&out_vcc->stats->rx);
22285+ atomic_inc_unchecked(&vcc->stats->tx);
22286+ atomic_inc_unchecked(&out_vcc->stats->rx);
22287 done:
22288 if (vcc->pop) vcc->pop(vcc,skb);
22289 else dev_kfree_skb(skb);
22290diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22291--- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22292+++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22293@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22294 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22295 vcc->dev->number);
22296 length = 0;
22297- atomic_inc(&vcc->stats->rx_err);
22298+ atomic_inc_unchecked(&vcc->stats->rx_err);
22299 }
22300 else {
22301 length = ATM_CELL_SIZE-1; /* no HEC */
22302@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22303 size);
22304 }
22305 eff = length = 0;
22306- atomic_inc(&vcc->stats->rx_err);
22307+ atomic_inc_unchecked(&vcc->stats->rx_err);
22308 }
22309 else {
22310 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22311@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22312 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22313 vcc->dev->number,vcc->vci,length,size << 2,descr);
22314 length = eff = 0;
22315- atomic_inc(&vcc->stats->rx_err);
22316+ atomic_inc_unchecked(&vcc->stats->rx_err);
22317 }
22318 }
22319 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22320@@ -771,7 +771,7 @@ rx_dequeued++;
22321 vcc->push(vcc,skb);
22322 pushed++;
22323 }
22324- atomic_inc(&vcc->stats->rx);
22325+ atomic_inc_unchecked(&vcc->stats->rx);
22326 }
22327 wake_up(&eni_dev->rx_wait);
22328 }
22329@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22330 PCI_DMA_TODEVICE);
22331 if (vcc->pop) vcc->pop(vcc,skb);
22332 else dev_kfree_skb_irq(skb);
22333- atomic_inc(&vcc->stats->tx);
22334+ atomic_inc_unchecked(&vcc->stats->tx);
22335 wake_up(&eni_dev->tx_wait);
22336 dma_complete++;
22337 }
22338diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22339--- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22340+++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22341@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22342 }
22343 }
22344
22345- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22346+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22347
22348 fs_dprintk (FS_DEBUG_TXMEM, "i");
22349 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22350@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22351 #endif
22352 skb_put (skb, qe->p1 & 0xffff);
22353 ATM_SKB(skb)->vcc = atm_vcc;
22354- atomic_inc(&atm_vcc->stats->rx);
22355+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22356 __net_timestamp(skb);
22357 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22358 atm_vcc->push (atm_vcc, skb);
22359@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22360 kfree (pe);
22361 }
22362 if (atm_vcc)
22363- atomic_inc(&atm_vcc->stats->rx_drop);
22364+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22365 break;
22366 case 0x1f: /* Reassembly abort: no buffers. */
22367 /* Silently increment error counter. */
22368 if (atm_vcc)
22369- atomic_inc(&atm_vcc->stats->rx_drop);
22370+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22371 break;
22372 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22373 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22374diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22375--- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22376+++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22377@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22378 #endif
22379 /* check error condition */
22380 if (*entry->status & STATUS_ERROR)
22381- atomic_inc(&vcc->stats->tx_err);
22382+ atomic_inc_unchecked(&vcc->stats->tx_err);
22383 else
22384- atomic_inc(&vcc->stats->tx);
22385+ atomic_inc_unchecked(&vcc->stats->tx);
22386 }
22387 }
22388
22389@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22390 if (skb == NULL) {
22391 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22392
22393- atomic_inc(&vcc->stats->rx_drop);
22394+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22395 return -ENOMEM;
22396 }
22397
22398@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22399
22400 dev_kfree_skb_any(skb);
22401
22402- atomic_inc(&vcc->stats->rx_drop);
22403+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22404 return -ENOMEM;
22405 }
22406
22407 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22408
22409 vcc->push(vcc, skb);
22410- atomic_inc(&vcc->stats->rx);
22411+ atomic_inc_unchecked(&vcc->stats->rx);
22412
22413 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22414
22415@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22416 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22417 fore200e->atm_dev->number,
22418 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22419- atomic_inc(&vcc->stats->rx_err);
22420+ atomic_inc_unchecked(&vcc->stats->rx_err);
22421 }
22422 }
22423
22424@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22425 goto retry_here;
22426 }
22427
22428- atomic_inc(&vcc->stats->tx_err);
22429+ atomic_inc_unchecked(&vcc->stats->tx_err);
22430
22431 fore200e->tx_sat++;
22432 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22433diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22434--- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22435+++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22436@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22437
22438 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22439 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22440- atomic_inc(&vcc->stats->rx_drop);
22441+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22442 goto return_host_buffers;
22443 }
22444
22445@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22446 RBRQ_LEN_ERR(he_dev->rbrq_head)
22447 ? "LEN_ERR" : "",
22448 vcc->vpi, vcc->vci);
22449- atomic_inc(&vcc->stats->rx_err);
22450+ atomic_inc_unchecked(&vcc->stats->rx_err);
22451 goto return_host_buffers;
22452 }
22453
22454@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22455 vcc->push(vcc, skb);
22456 spin_lock(&he_dev->global_lock);
22457
22458- atomic_inc(&vcc->stats->rx);
22459+ atomic_inc_unchecked(&vcc->stats->rx);
22460
22461 return_host_buffers:
22462 ++pdus_assembled;
22463@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22464 tpd->vcc->pop(tpd->vcc, tpd->skb);
22465 else
22466 dev_kfree_skb_any(tpd->skb);
22467- atomic_inc(&tpd->vcc->stats->tx_err);
22468+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22469 }
22470 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22471 return;
22472@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22473 vcc->pop(vcc, skb);
22474 else
22475 dev_kfree_skb_any(skb);
22476- atomic_inc(&vcc->stats->tx_err);
22477+ atomic_inc_unchecked(&vcc->stats->tx_err);
22478 return -EINVAL;
22479 }
22480
22481@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22482 vcc->pop(vcc, skb);
22483 else
22484 dev_kfree_skb_any(skb);
22485- atomic_inc(&vcc->stats->tx_err);
22486+ atomic_inc_unchecked(&vcc->stats->tx_err);
22487 return -EINVAL;
22488 }
22489 #endif
22490@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22491 vcc->pop(vcc, skb);
22492 else
22493 dev_kfree_skb_any(skb);
22494- atomic_inc(&vcc->stats->tx_err);
22495+ atomic_inc_unchecked(&vcc->stats->tx_err);
22496 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22497 return -ENOMEM;
22498 }
22499@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22500 vcc->pop(vcc, skb);
22501 else
22502 dev_kfree_skb_any(skb);
22503- atomic_inc(&vcc->stats->tx_err);
22504+ atomic_inc_unchecked(&vcc->stats->tx_err);
22505 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22506 return -ENOMEM;
22507 }
22508@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22509 __enqueue_tpd(he_dev, tpd, cid);
22510 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22511
22512- atomic_inc(&vcc->stats->tx);
22513+ atomic_inc_unchecked(&vcc->stats->tx);
22514
22515 return 0;
22516 }
22517diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22518--- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22519+++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22520@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22521 {
22522 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22523 // VC layer stats
22524- atomic_inc(&vcc->stats->rx);
22525+ atomic_inc_unchecked(&vcc->stats->rx);
22526 __net_timestamp(skb);
22527 // end of our responsibility
22528 vcc->push (vcc, skb);
22529@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22530 dev->tx_iovec = NULL;
22531
22532 // VC layer stats
22533- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22534+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22535
22536 // free the skb
22537 hrz_kfree_skb (skb);
22538diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22539--- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22540+++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22541@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22542 else
22543 dev_kfree_skb(skb);
22544
22545- atomic_inc(&vcc->stats->tx);
22546+ atomic_inc_unchecked(&vcc->stats->tx);
22547 }
22548
22549 atomic_dec(&scq->used);
22550@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22551 if ((sb = dev_alloc_skb(64)) == NULL) {
22552 printk("%s: Can't allocate buffers for aal0.\n",
22553 card->name);
22554- atomic_add(i, &vcc->stats->rx_drop);
22555+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22556 break;
22557 }
22558 if (!atm_charge(vcc, sb->truesize)) {
22559 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22560 card->name);
22561- atomic_add(i - 1, &vcc->stats->rx_drop);
22562+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22563 dev_kfree_skb(sb);
22564 break;
22565 }
22566@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22567 ATM_SKB(sb)->vcc = vcc;
22568 __net_timestamp(sb);
22569 vcc->push(vcc, sb);
22570- atomic_inc(&vcc->stats->rx);
22571+ atomic_inc_unchecked(&vcc->stats->rx);
22572
22573 cell += ATM_CELL_PAYLOAD;
22574 }
22575@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22576 "(CDC: %08x)\n",
22577 card->name, len, rpp->len, readl(SAR_REG_CDC));
22578 recycle_rx_pool_skb(card, rpp);
22579- atomic_inc(&vcc->stats->rx_err);
22580+ atomic_inc_unchecked(&vcc->stats->rx_err);
22581 return;
22582 }
22583 if (stat & SAR_RSQE_CRC) {
22584 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22585 recycle_rx_pool_skb(card, rpp);
22586- atomic_inc(&vcc->stats->rx_err);
22587+ atomic_inc_unchecked(&vcc->stats->rx_err);
22588 return;
22589 }
22590 if (skb_queue_len(&rpp->queue) > 1) {
22591@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22592 RXPRINTK("%s: Can't alloc RX skb.\n",
22593 card->name);
22594 recycle_rx_pool_skb(card, rpp);
22595- atomic_inc(&vcc->stats->rx_err);
22596+ atomic_inc_unchecked(&vcc->stats->rx_err);
22597 return;
22598 }
22599 if (!atm_charge(vcc, skb->truesize)) {
22600@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22601 __net_timestamp(skb);
22602
22603 vcc->push(vcc, skb);
22604- atomic_inc(&vcc->stats->rx);
22605+ atomic_inc_unchecked(&vcc->stats->rx);
22606
22607 return;
22608 }
22609@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22610 __net_timestamp(skb);
22611
22612 vcc->push(vcc, skb);
22613- atomic_inc(&vcc->stats->rx);
22614+ atomic_inc_unchecked(&vcc->stats->rx);
22615
22616 if (skb->truesize > SAR_FB_SIZE_3)
22617 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22618@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22619 if (vcc->qos.aal != ATM_AAL0) {
22620 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22621 card->name, vpi, vci);
22622- atomic_inc(&vcc->stats->rx_drop);
22623+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22624 goto drop;
22625 }
22626
22627 if ((sb = dev_alloc_skb(64)) == NULL) {
22628 printk("%s: Can't allocate buffers for AAL0.\n",
22629 card->name);
22630- atomic_inc(&vcc->stats->rx_err);
22631+ atomic_inc_unchecked(&vcc->stats->rx_err);
22632 goto drop;
22633 }
22634
22635@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22636 ATM_SKB(sb)->vcc = vcc;
22637 __net_timestamp(sb);
22638 vcc->push(vcc, sb);
22639- atomic_inc(&vcc->stats->rx);
22640+ atomic_inc_unchecked(&vcc->stats->rx);
22641
22642 drop:
22643 skb_pull(queue, 64);
22644@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22645
22646 if (vc == NULL) {
22647 printk("%s: NULL connection in send().\n", card->name);
22648- atomic_inc(&vcc->stats->tx_err);
22649+ atomic_inc_unchecked(&vcc->stats->tx_err);
22650 dev_kfree_skb(skb);
22651 return -EINVAL;
22652 }
22653 if (!test_bit(VCF_TX, &vc->flags)) {
22654 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22655- atomic_inc(&vcc->stats->tx_err);
22656+ atomic_inc_unchecked(&vcc->stats->tx_err);
22657 dev_kfree_skb(skb);
22658 return -EINVAL;
22659 }
22660@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22661 break;
22662 default:
22663 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22664- atomic_inc(&vcc->stats->tx_err);
22665+ atomic_inc_unchecked(&vcc->stats->tx_err);
22666 dev_kfree_skb(skb);
22667 return -EINVAL;
22668 }
22669
22670 if (skb_shinfo(skb)->nr_frags != 0) {
22671 printk("%s: No scatter-gather yet.\n", card->name);
22672- atomic_inc(&vcc->stats->tx_err);
22673+ atomic_inc_unchecked(&vcc->stats->tx_err);
22674 dev_kfree_skb(skb);
22675 return -EINVAL;
22676 }
22677@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22678
22679 err = queue_skb(card, vc, skb, oam);
22680 if (err) {
22681- atomic_inc(&vcc->stats->tx_err);
22682+ atomic_inc_unchecked(&vcc->stats->tx_err);
22683 dev_kfree_skb(skb);
22684 return err;
22685 }
22686@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22687 skb = dev_alloc_skb(64);
22688 if (!skb) {
22689 printk("%s: Out of memory in send_oam().\n", card->name);
22690- atomic_inc(&vcc->stats->tx_err);
22691+ atomic_inc_unchecked(&vcc->stats->tx_err);
22692 return -ENOMEM;
22693 }
22694 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22695diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22696--- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22697+++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22698@@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22699 status = (u_short) (buf_desc_ptr->desc_mode);
22700 if (status & (RX_CER | RX_PTE | RX_OFL))
22701 {
22702- atomic_inc(&vcc->stats->rx_err);
22703+ atomic_inc_unchecked(&vcc->stats->rx_err);
22704 IF_ERR(printk("IA: bad packet, dropping it");)
22705 if (status & RX_CER) {
22706 IF_ERR(printk(" cause: packet CRC error\n");)
22707@@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22708 len = dma_addr - buf_addr;
22709 if (len > iadev->rx_buf_sz) {
22710 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22711- atomic_inc(&vcc->stats->rx_err);
22712+ atomic_inc_unchecked(&vcc->stats->rx_err);
22713 goto out_free_desc;
22714 }
22715
22716@@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22717 ia_vcc = INPH_IA_VCC(vcc);
22718 if (ia_vcc == NULL)
22719 {
22720- atomic_inc(&vcc->stats->rx_err);
22721+ atomic_inc_unchecked(&vcc->stats->rx_err);
22722 dev_kfree_skb_any(skb);
22723 atm_return(vcc, atm_guess_pdu2truesize(len));
22724 goto INCR_DLE;
22725@@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22726 if ((length > iadev->rx_buf_sz) || (length >
22727 (skb->len - sizeof(struct cpcs_trailer))))
22728 {
22729- atomic_inc(&vcc->stats->rx_err);
22730+ atomic_inc_unchecked(&vcc->stats->rx_err);
22731 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22732 length, skb->len);)
22733 dev_kfree_skb_any(skb);
22734@@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22735
22736 IF_RX(printk("rx_dle_intr: skb push");)
22737 vcc->push(vcc,skb);
22738- atomic_inc(&vcc->stats->rx);
22739+ atomic_inc_unchecked(&vcc->stats->rx);
22740 iadev->rx_pkt_cnt++;
22741 }
22742 INCR_DLE:
22743@@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22744 {
22745 struct k_sonet_stats *stats;
22746 stats = &PRIV(_ia_dev[board])->sonet_stats;
22747- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22748- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22749- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22750- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22751- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22752- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22753- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22754- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22755- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22756+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22757+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22758+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22759+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22760+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22761+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22762+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22763+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22764+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22765 }
22766 ia_cmds.status = 0;
22767 break;
22768@@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22769 if ((desc == 0) || (desc > iadev->num_tx_desc))
22770 {
22771 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22772- atomic_inc(&vcc->stats->tx);
22773+ atomic_inc_unchecked(&vcc->stats->tx);
22774 if (vcc->pop)
22775 vcc->pop(vcc, skb);
22776 else
22777@@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22778 ATM_DESC(skb) = vcc->vci;
22779 skb_queue_tail(&iadev->tx_dma_q, skb);
22780
22781- atomic_inc(&vcc->stats->tx);
22782+ atomic_inc_unchecked(&vcc->stats->tx);
22783 iadev->tx_pkt_cnt++;
22784 /* Increment transaction counter */
22785 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22786
22787 #if 0
22788 /* add flow control logic */
22789- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22790+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22791 if (iavcc->vc_desc_cnt > 10) {
22792 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22793 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22794diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22795--- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22796+++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22797@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22798 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22799 lanai_endtx(lanai, lvcc);
22800 lanai_free_skb(lvcc->tx.atmvcc, skb);
22801- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22802+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22803 }
22804
22805 /* Try to fill the buffer - don't call unless there is backlog */
22806@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22807 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22808 __net_timestamp(skb);
22809 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22810- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22811+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22812 out:
22813 lvcc->rx.buf.ptr = end;
22814 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22815@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22816 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22817 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22818 lanai->stats.service_rxnotaal5++;
22819- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22820+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22821 return 0;
22822 }
22823 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22824@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22825 int bytes;
22826 read_unlock(&vcc_sklist_lock);
22827 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22828- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22829+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22830 lvcc->stats.x.aal5.service_trash++;
22831 bytes = (SERVICE_GET_END(s) * 16) -
22832 (((unsigned long) lvcc->rx.buf.ptr) -
22833@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22834 }
22835 if (s & SERVICE_STREAM) {
22836 read_unlock(&vcc_sklist_lock);
22837- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22838+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22839 lvcc->stats.x.aal5.service_stream++;
22840 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22841 "PDU on VCI %d!\n", lanai->number, vci);
22842@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22843 return 0;
22844 }
22845 DPRINTK("got rx crc error on vci %d\n", vci);
22846- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22847+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22848 lvcc->stats.x.aal5.service_rxcrc++;
22849 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22850 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22851diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22852--- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22853+++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22854@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22855 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22856 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22857 card->index);
22858- atomic_inc(&vcc->stats->tx_err);
22859+ atomic_inc_unchecked(&vcc->stats->tx_err);
22860 dev_kfree_skb_any(skb);
22861 return -EINVAL;
22862 }
22863@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22864 if (!vc->tx) {
22865 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22866 card->index);
22867- atomic_inc(&vcc->stats->tx_err);
22868+ atomic_inc_unchecked(&vcc->stats->tx_err);
22869 dev_kfree_skb_any(skb);
22870 return -EINVAL;
22871 }
22872@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22873 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22874 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22875 card->index);
22876- atomic_inc(&vcc->stats->tx_err);
22877+ atomic_inc_unchecked(&vcc->stats->tx_err);
22878 dev_kfree_skb_any(skb);
22879 return -EINVAL;
22880 }
22881
22882 if (skb_shinfo(skb)->nr_frags != 0) {
22883 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22884- atomic_inc(&vcc->stats->tx_err);
22885+ atomic_inc_unchecked(&vcc->stats->tx_err);
22886 dev_kfree_skb_any(skb);
22887 return -EINVAL;
22888 }
22889@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22890 }
22891
22892 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22893- atomic_inc(&vcc->stats->tx_err);
22894+ atomic_inc_unchecked(&vcc->stats->tx_err);
22895 dev_kfree_skb_any(skb);
22896 return -EIO;
22897 }
22898- atomic_inc(&vcc->stats->tx);
22899+ atomic_inc_unchecked(&vcc->stats->tx);
22900
22901 return 0;
22902 }
22903@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22904 printk
22905 ("nicstar%d: Can't allocate buffers for aal0.\n",
22906 card->index);
22907- atomic_add(i, &vcc->stats->rx_drop);
22908+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22909 break;
22910 }
22911 if (!atm_charge(vcc, sb->truesize)) {
22912 RXPRINTK
22913 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22914 card->index);
22915- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22916+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22917 dev_kfree_skb_any(sb);
22918 break;
22919 }
22920@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22921 ATM_SKB(sb)->vcc = vcc;
22922 __net_timestamp(sb);
22923 vcc->push(vcc, sb);
22924- atomic_inc(&vcc->stats->rx);
22925+ atomic_inc_unchecked(&vcc->stats->rx);
22926 cell += ATM_CELL_PAYLOAD;
22927 }
22928
22929@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22930 if (iovb == NULL) {
22931 printk("nicstar%d: Out of iovec buffers.\n",
22932 card->index);
22933- atomic_inc(&vcc->stats->rx_drop);
22934+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22935 recycle_rx_buf(card, skb);
22936 return;
22937 }
22938@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22939 small or large buffer itself. */
22940 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22941 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22942- atomic_inc(&vcc->stats->rx_err);
22943+ atomic_inc_unchecked(&vcc->stats->rx_err);
22944 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22945 NS_MAX_IOVECS);
22946 NS_PRV_IOVCNT(iovb) = 0;
22947@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22948 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22949 card->index);
22950 which_list(card, skb);
22951- atomic_inc(&vcc->stats->rx_err);
22952+ atomic_inc_unchecked(&vcc->stats->rx_err);
22953 recycle_rx_buf(card, skb);
22954 vc->rx_iov = NULL;
22955 recycle_iov_buf(card, iovb);
22956@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22957 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22958 card->index);
22959 which_list(card, skb);
22960- atomic_inc(&vcc->stats->rx_err);
22961+ atomic_inc_unchecked(&vcc->stats->rx_err);
22962 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22963 NS_PRV_IOVCNT(iovb));
22964 vc->rx_iov = NULL;
22965@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22966 printk(" - PDU size mismatch.\n");
22967 else
22968 printk(".\n");
22969- atomic_inc(&vcc->stats->rx_err);
22970+ atomic_inc_unchecked(&vcc->stats->rx_err);
22971 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22972 NS_PRV_IOVCNT(iovb));
22973 vc->rx_iov = NULL;
22974@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22975 /* skb points to a small buffer */
22976 if (!atm_charge(vcc, skb->truesize)) {
22977 push_rxbufs(card, skb);
22978- atomic_inc(&vcc->stats->rx_drop);
22979+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22980 } else {
22981 skb_put(skb, len);
22982 dequeue_sm_buf(card, skb);
22983@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22984 ATM_SKB(skb)->vcc = vcc;
22985 __net_timestamp(skb);
22986 vcc->push(vcc, skb);
22987- atomic_inc(&vcc->stats->rx);
22988+ atomic_inc_unchecked(&vcc->stats->rx);
22989 }
22990 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22991 struct sk_buff *sb;
22992@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22993 if (len <= NS_SMBUFSIZE) {
22994 if (!atm_charge(vcc, sb->truesize)) {
22995 push_rxbufs(card, sb);
22996- atomic_inc(&vcc->stats->rx_drop);
22997+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22998 } else {
22999 skb_put(sb, len);
23000 dequeue_sm_buf(card, sb);
23001@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23002 ATM_SKB(sb)->vcc = vcc;
23003 __net_timestamp(sb);
23004 vcc->push(vcc, sb);
23005- atomic_inc(&vcc->stats->rx);
23006+ atomic_inc_unchecked(&vcc->stats->rx);
23007 }
23008
23009 push_rxbufs(card, skb);
23010@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23011
23012 if (!atm_charge(vcc, skb->truesize)) {
23013 push_rxbufs(card, skb);
23014- atomic_inc(&vcc->stats->rx_drop);
23015+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23016 } else {
23017 dequeue_lg_buf(card, skb);
23018 #ifdef NS_USE_DESTRUCTORS
23019@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23020 ATM_SKB(skb)->vcc = vcc;
23021 __net_timestamp(skb);
23022 vcc->push(vcc, skb);
23023- atomic_inc(&vcc->stats->rx);
23024+ atomic_inc_unchecked(&vcc->stats->rx);
23025 }
23026
23027 push_rxbufs(card, sb);
23028@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23029 printk
23030 ("nicstar%d: Out of huge buffers.\n",
23031 card->index);
23032- atomic_inc(&vcc->stats->rx_drop);
23033+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23034 recycle_iovec_rx_bufs(card,
23035 (struct iovec *)
23036 iovb->data,
23037@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23038 card->hbpool.count++;
23039 } else
23040 dev_kfree_skb_any(hb);
23041- atomic_inc(&vcc->stats->rx_drop);
23042+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23043 } else {
23044 /* Copy the small buffer to the huge buffer */
23045 sb = (struct sk_buff *)iov->iov_base;
23046@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23047 #endif /* NS_USE_DESTRUCTORS */
23048 __net_timestamp(hb);
23049 vcc->push(vcc, hb);
23050- atomic_inc(&vcc->stats->rx);
23051+ atomic_inc_unchecked(&vcc->stats->rx);
23052 }
23053 }
23054
23055diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23056--- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23057+++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23058@@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23059 }
23060 atm_charge(vcc, skb->truesize);
23061 vcc->push(vcc, skb);
23062- atomic_inc(&vcc->stats->rx);
23063+ atomic_inc_unchecked(&vcc->stats->rx);
23064 break;
23065
23066 case PKT_STATUS:
23067@@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23068 char msg[500];
23069 char item[10];
23070
23071+ pax_track_stack();
23072+
23073 len = buf->len;
23074 for (i = 0; i < len; i++){
23075 if(i % 8 == 0)
23076@@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23077 vcc = SKB_CB(oldskb)->vcc;
23078
23079 if (vcc) {
23080- atomic_inc(&vcc->stats->tx);
23081+ atomic_inc_unchecked(&vcc->stats->tx);
23082 solos_pop(vcc, oldskb);
23083 } else
23084 dev_kfree_skb_irq(oldskb);
23085diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23086--- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23087+++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23088@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23089
23090
23091 #define ADD_LIMITED(s,v) \
23092- atomic_add((v),&stats->s); \
23093- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23094+ atomic_add_unchecked((v),&stats->s); \
23095+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23096
23097
23098 static void suni_hz(unsigned long from_timer)
23099diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23100--- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23101+++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23102@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23103 struct sonet_stats tmp;
23104 int error = 0;
23105
23106- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23107+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23108 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23109 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23110 if (zero && !error) {
23111@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23112
23113
23114 #define ADD_LIMITED(s,v) \
23115- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23116- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23117- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23118+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23119+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23120+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23121
23122
23123 static void stat_event(struct atm_dev *dev)
23124@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23125 if (reason & uPD98402_INT_PFM) stat_event(dev);
23126 if (reason & uPD98402_INT_PCO) {
23127 (void) GET(PCOCR); /* clear interrupt cause */
23128- atomic_add(GET(HECCT),
23129+ atomic_add_unchecked(GET(HECCT),
23130 &PRIV(dev)->sonet_stats.uncorr_hcs);
23131 }
23132 if ((reason & uPD98402_INT_RFO) &&
23133@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23134 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23135 uPD98402_INT_LOS),PIMR); /* enable them */
23136 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23137- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23138- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23139- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23140+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23141+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23142+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23143 return 0;
23144 }
23145
23146diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23147--- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23148+++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23149@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23150 }
23151 if (!size) {
23152 dev_kfree_skb_irq(skb);
23153- if (vcc) atomic_inc(&vcc->stats->rx_err);
23154+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23155 continue;
23156 }
23157 if (!atm_charge(vcc,skb->truesize)) {
23158@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23159 skb->len = size;
23160 ATM_SKB(skb)->vcc = vcc;
23161 vcc->push(vcc,skb);
23162- atomic_inc(&vcc->stats->rx);
23163+ atomic_inc_unchecked(&vcc->stats->rx);
23164 }
23165 zout(pos & 0xffff,MTA(mbx));
23166 #if 0 /* probably a stupid idea */
23167@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23168 skb_queue_head(&zatm_vcc->backlog,skb);
23169 break;
23170 }
23171- atomic_inc(&vcc->stats->tx);
23172+ atomic_inc_unchecked(&vcc->stats->tx);
23173 wake_up(&zatm_vcc->tx_wait);
23174 }
23175
23176diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23177--- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23178+++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23179@@ -29,14 +29,14 @@ bool events_check_enabled;
23180 * They need to be modified together atomically, so it's better to use one
23181 * atomic variable to hold them both.
23182 */
23183-static atomic_t combined_event_count = ATOMIC_INIT(0);
23184+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23185
23186 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23187 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23188
23189 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23190 {
23191- unsigned int comb = atomic_read(&combined_event_count);
23192+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23193
23194 *cnt = (comb >> IN_PROGRESS_BITS);
23195 *inpr = comb & MAX_IN_PROGRESS;
23196@@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23197 ws->last_time = ktime_get();
23198
23199 /* Increment the counter of events in progress. */
23200- atomic_inc(&combined_event_count);
23201+ atomic_inc_unchecked(&combined_event_count);
23202 }
23203
23204 /**
23205@@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23206 * Increment the counter of registered wakeup events and decrement the
23207 * couter of wakeup events in progress simultaneously.
23208 */
23209- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23210+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23211 }
23212
23213 /**
23214diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23215--- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23216+++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23217@@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23218 int err;
23219 u32 cp;
23220
23221+ memset(&arg64, 0, sizeof(arg64));
23222+
23223 err = 0;
23224 err |=
23225 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23226@@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23227 while (!list_empty(&h->reqQ)) {
23228 c = list_entry(h->reqQ.next, CommandList_struct, list);
23229 /* can't do anything if fifo is full */
23230- if ((h->access.fifo_full(h))) {
23231+ if ((h->access->fifo_full(h))) {
23232 dev_warn(&h->pdev->dev, "fifo full\n");
23233 break;
23234 }
23235@@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23236 h->Qdepth--;
23237
23238 /* Tell the controller execute command */
23239- h->access.submit_command(h, c);
23240+ h->access->submit_command(h, c);
23241
23242 /* Put job onto the completed Q */
23243 addQ(&h->cmpQ, c);
23244@@ -3369,17 +3371,17 @@ startio:
23245
23246 static inline unsigned long get_next_completion(ctlr_info_t *h)
23247 {
23248- return h->access.command_completed(h);
23249+ return h->access->command_completed(h);
23250 }
23251
23252 static inline int interrupt_pending(ctlr_info_t *h)
23253 {
23254- return h->access.intr_pending(h);
23255+ return h->access->intr_pending(h);
23256 }
23257
23258 static inline long interrupt_not_for_us(ctlr_info_t *h)
23259 {
23260- return ((h->access.intr_pending(h) == 0) ||
23261+ return ((h->access->intr_pending(h) == 0) ||
23262 (h->interrupts_enabled == 0));
23263 }
23264
23265@@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23266 u32 a;
23267
23268 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23269- return h->access.command_completed(h);
23270+ return h->access->command_completed(h);
23271
23272 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23273 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23274@@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23275 trans_support & CFGTBL_Trans_use_short_tags);
23276
23277 /* Change the access methods to the performant access methods */
23278- h->access = SA5_performant_access;
23279+ h->access = &SA5_performant_access;
23280 h->transMethod = CFGTBL_Trans_Performant;
23281
23282 return;
23283@@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23284 if (prod_index < 0)
23285 return -ENODEV;
23286 h->product_name = products[prod_index].product_name;
23287- h->access = *(products[prod_index].access);
23288+ h->access = products[prod_index].access;
23289
23290 if (cciss_board_disabled(h)) {
23291 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23292@@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23293 }
23294
23295 /* make sure the board interrupts are off */
23296- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23297+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23298 if (h->msi_vector || h->msix_vector) {
23299 if (request_irq(h->intr[PERF_MODE_INT],
23300 do_cciss_msix_intr,
23301@@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23302 cciss_scsi_setup(h);
23303
23304 /* Turn the interrupts on so we can service requests */
23305- h->access.set_intr_mask(h, CCISS_INTR_ON);
23306+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23307
23308 /* Get the firmware version */
23309 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23310@@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23311 kfree(flush_buf);
23312 if (return_code != IO_OK)
23313 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23314- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23315+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23316 free_irq(h->intr[PERF_MODE_INT], h);
23317 }
23318
23319diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23320--- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23321+++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23322@@ -100,7 +100,7 @@ struct ctlr_info
23323 /* information about each logical volume */
23324 drive_info_struct *drv[CISS_MAX_LUN];
23325
23326- struct access_method access;
23327+ struct access_method *access;
23328
23329 /* queue and queue Info */
23330 struct list_head reqQ;
23331diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23332--- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23333+++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23334@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23335 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23336 goto Enomem4;
23337 }
23338- hba[i]->access.set_intr_mask(hba[i], 0);
23339+ hba[i]->access->set_intr_mask(hba[i], 0);
23340 if (request_irq(hba[i]->intr, do_ida_intr,
23341 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23342 {
23343@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23344 add_timer(&hba[i]->timer);
23345
23346 /* Enable IRQ now that spinlock and rate limit timer are set up */
23347- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23348+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23349
23350 for(j=0; j<NWD; j++) {
23351 struct gendisk *disk = ida_gendisk[i][j];
23352@@ -694,7 +694,7 @@ DBGINFO(
23353 for(i=0; i<NR_PRODUCTS; i++) {
23354 if (board_id == products[i].board_id) {
23355 c->product_name = products[i].product_name;
23356- c->access = *(products[i].access);
23357+ c->access = products[i].access;
23358 break;
23359 }
23360 }
23361@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23362 hba[ctlr]->intr = intr;
23363 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23364 hba[ctlr]->product_name = products[j].product_name;
23365- hba[ctlr]->access = *(products[j].access);
23366+ hba[ctlr]->access = products[j].access;
23367 hba[ctlr]->ctlr = ctlr;
23368 hba[ctlr]->board_id = board_id;
23369 hba[ctlr]->pci_dev = NULL; /* not PCI */
23370@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23371 struct scatterlist tmp_sg[SG_MAX];
23372 int i, dir, seg;
23373
23374+ pax_track_stack();
23375+
23376 queue_next:
23377 creq = blk_peek_request(q);
23378 if (!creq)
23379@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23380
23381 while((c = h->reqQ) != NULL) {
23382 /* Can't do anything if we're busy */
23383- if (h->access.fifo_full(h) == 0)
23384+ if (h->access->fifo_full(h) == 0)
23385 return;
23386
23387 /* Get the first entry from the request Q */
23388@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23389 h->Qdepth--;
23390
23391 /* Tell the controller to do our bidding */
23392- h->access.submit_command(h, c);
23393+ h->access->submit_command(h, c);
23394
23395 /* Get onto the completion Q */
23396 addQ(&h->cmpQ, c);
23397@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23398 unsigned long flags;
23399 __u32 a,a1;
23400
23401- istat = h->access.intr_pending(h);
23402+ istat = h->access->intr_pending(h);
23403 /* Is this interrupt for us? */
23404 if (istat == 0)
23405 return IRQ_NONE;
23406@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23407 */
23408 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23409 if (istat & FIFO_NOT_EMPTY) {
23410- while((a = h->access.command_completed(h))) {
23411+ while((a = h->access->command_completed(h))) {
23412 a1 = a; a &= ~3;
23413 if ((c = h->cmpQ) == NULL)
23414 {
23415@@ -1449,11 +1451,11 @@ static int sendcmd(
23416 /*
23417 * Disable interrupt
23418 */
23419- info_p->access.set_intr_mask(info_p, 0);
23420+ info_p->access->set_intr_mask(info_p, 0);
23421 /* Make sure there is room in the command FIFO */
23422 /* Actually it should be completely empty at this time. */
23423 for (i = 200000; i > 0; i--) {
23424- temp = info_p->access.fifo_full(info_p);
23425+ temp = info_p->access->fifo_full(info_p);
23426 if (temp != 0) {
23427 break;
23428 }
23429@@ -1466,7 +1468,7 @@ DBG(
23430 /*
23431 * Send the cmd
23432 */
23433- info_p->access.submit_command(info_p, c);
23434+ info_p->access->submit_command(info_p, c);
23435 complete = pollcomplete(ctlr);
23436
23437 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23438@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23439 * we check the new geometry. Then turn interrupts back on when
23440 * we're done.
23441 */
23442- host->access.set_intr_mask(host, 0);
23443+ host->access->set_intr_mask(host, 0);
23444 getgeometry(ctlr);
23445- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23446+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23447
23448 for(i=0; i<NWD; i++) {
23449 struct gendisk *disk = ida_gendisk[ctlr][i];
23450@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23451 /* Wait (up to 2 seconds) for a command to complete */
23452
23453 for (i = 200000; i > 0; i--) {
23454- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23455+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23456 if (done == 0) {
23457 udelay(10); /* a short fixed delay */
23458 } else
23459diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23460--- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23461+++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23462@@ -99,7 +99,7 @@ struct ctlr_info {
23463 drv_info_t drv[NWD];
23464 struct proc_dir_entry *proc;
23465
23466- struct access_method access;
23467+ struct access_method *access;
23468
23469 cmdlist_t *reqQ;
23470 cmdlist_t *cmpQ;
23471diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23472--- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23473+++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23474@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23475 unsigned long flags;
23476 int Channel, TargetID;
23477
23478+ pax_track_stack();
23479+
23480 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23481 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23482 sizeof(DAC960_SCSI_Inquiry_T) +
23483diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23484--- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23485+++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23486@@ -736,7 +736,7 @@ struct drbd_request;
23487 struct drbd_epoch {
23488 struct list_head list;
23489 unsigned int barrier_nr;
23490- atomic_t epoch_size; /* increased on every request added. */
23491+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23492 atomic_t active; /* increased on every req. added, and dec on every finished. */
23493 unsigned long flags;
23494 };
23495@@ -1108,7 +1108,7 @@ struct drbd_conf {
23496 void *int_dig_in;
23497 void *int_dig_vv;
23498 wait_queue_head_t seq_wait;
23499- atomic_t packet_seq;
23500+ atomic_unchecked_t packet_seq;
23501 unsigned int peer_seq;
23502 spinlock_t peer_seq_lock;
23503 unsigned int minor;
23504diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23505--- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23506+++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23507@@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23508 p.sector = sector;
23509 p.block_id = block_id;
23510 p.blksize = blksize;
23511- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23512+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23513
23514 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23515 return false;
23516@@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23517 p.sector = cpu_to_be64(req->sector);
23518 p.block_id = (unsigned long)req;
23519 p.seq_num = cpu_to_be32(req->seq_num =
23520- atomic_add_return(1, &mdev->packet_seq));
23521+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23522
23523 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23524
23525@@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23526 atomic_set(&mdev->unacked_cnt, 0);
23527 atomic_set(&mdev->local_cnt, 0);
23528 atomic_set(&mdev->net_cnt, 0);
23529- atomic_set(&mdev->packet_seq, 0);
23530+ atomic_set_unchecked(&mdev->packet_seq, 0);
23531 atomic_set(&mdev->pp_in_use, 0);
23532 atomic_set(&mdev->pp_in_use_by_net, 0);
23533 atomic_set(&mdev->rs_sect_in, 0);
23534@@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23535 mdev->receiver.t_state);
23536
23537 /* no need to lock it, I'm the only thread alive */
23538- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23539- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23540+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23541+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23542 mdev->al_writ_cnt =
23543 mdev->bm_writ_cnt =
23544 mdev->read_cnt =
23545diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23546--- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23547+++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23548@@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23549 module_put(THIS_MODULE);
23550 }
23551
23552-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23553+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23554
23555 static unsigned short *
23556 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23557@@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23558 cn_reply->id.idx = CN_IDX_DRBD;
23559 cn_reply->id.val = CN_VAL_DRBD;
23560
23561- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23562+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23563 cn_reply->ack = 0; /* not used here. */
23564 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23565 (int)((char *)tl - (char *)reply->tag_list);
23566@@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23567 cn_reply->id.idx = CN_IDX_DRBD;
23568 cn_reply->id.val = CN_VAL_DRBD;
23569
23570- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23571+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23572 cn_reply->ack = 0; /* not used here. */
23573 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23574 (int)((char *)tl - (char *)reply->tag_list);
23575@@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23576 cn_reply->id.idx = CN_IDX_DRBD;
23577 cn_reply->id.val = CN_VAL_DRBD;
23578
23579- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23580+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23581 cn_reply->ack = 0; // not used here.
23582 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23583 (int)((char*)tl - (char*)reply->tag_list);
23584@@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23585 cn_reply->id.idx = CN_IDX_DRBD;
23586 cn_reply->id.val = CN_VAL_DRBD;
23587
23588- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23589+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23590 cn_reply->ack = 0; /* not used here. */
23591 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23592 (int)((char *)tl - (char *)reply->tag_list);
23593diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23594--- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23595+++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23596@@ -894,7 +894,7 @@ retry:
23597 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23598 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23599
23600- atomic_set(&mdev->packet_seq, 0);
23601+ atomic_set_unchecked(&mdev->packet_seq, 0);
23602 mdev->peer_seq = 0;
23603
23604 drbd_thread_start(&mdev->asender);
23605@@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23606 do {
23607 next_epoch = NULL;
23608
23609- epoch_size = atomic_read(&epoch->epoch_size);
23610+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23611
23612 switch (ev & ~EV_CLEANUP) {
23613 case EV_PUT:
23614@@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23615 rv = FE_DESTROYED;
23616 } else {
23617 epoch->flags = 0;
23618- atomic_set(&epoch->epoch_size, 0);
23619+ atomic_set_unchecked(&epoch->epoch_size, 0);
23620 /* atomic_set(&epoch->active, 0); is already zero */
23621 if (rv == FE_STILL_LIVE)
23622 rv = FE_RECYCLED;
23623@@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23624 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23625 drbd_flush(mdev);
23626
23627- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23628+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23629 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23630 if (epoch)
23631 break;
23632 }
23633
23634 epoch = mdev->current_epoch;
23635- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23636+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23637
23638 D_ASSERT(atomic_read(&epoch->active) == 0);
23639 D_ASSERT(epoch->flags == 0);
23640@@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23641 }
23642
23643 epoch->flags = 0;
23644- atomic_set(&epoch->epoch_size, 0);
23645+ atomic_set_unchecked(&epoch->epoch_size, 0);
23646 atomic_set(&epoch->active, 0);
23647
23648 spin_lock(&mdev->epoch_lock);
23649- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23650+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23651 list_add(&epoch->list, &mdev->current_epoch->list);
23652 mdev->current_epoch = epoch;
23653 mdev->epochs++;
23654@@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23655 spin_unlock(&mdev->peer_seq_lock);
23656
23657 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23658- atomic_inc(&mdev->current_epoch->epoch_size);
23659+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23660 return drbd_drain_block(mdev, data_size);
23661 }
23662
23663@@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23664
23665 spin_lock(&mdev->epoch_lock);
23666 e->epoch = mdev->current_epoch;
23667- atomic_inc(&e->epoch->epoch_size);
23668+ atomic_inc_unchecked(&e->epoch->epoch_size);
23669 atomic_inc(&e->epoch->active);
23670 spin_unlock(&mdev->epoch_lock);
23671
23672@@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23673 D_ASSERT(list_empty(&mdev->done_ee));
23674
23675 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23676- atomic_set(&mdev->current_epoch->epoch_size, 0);
23677+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23678 D_ASSERT(list_empty(&mdev->current_epoch->list));
23679 }
23680
23681diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23682--- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23683+++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23684@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23685 struct kvec iov;
23686 sigset_t blocked, oldset;
23687
23688+ pax_track_stack();
23689+
23690 if (unlikely(!sock)) {
23691 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23692 lo->disk->disk_name, (send ? "send" : "recv"));
23693@@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23694 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23695 unsigned int cmd, unsigned long arg)
23696 {
23697+ pax_track_stack();
23698+
23699 switch (cmd) {
23700 case NBD_DISCONNECT: {
23701 struct request sreq;
23702diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23703--- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23704+++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23705@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23706 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23707 return -EFAULT;
23708
23709- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23710+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23711 return -EFAULT;
23712
23713 client = agp_find_client_by_pid(reserve.pid);
23714diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23715--- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23716+++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23717@@ -9,6 +9,7 @@
23718 #include <linux/types.h>
23719 #include <linux/errno.h>
23720 #include <linux/tty.h>
23721+#include <linux/mutex.h>
23722 #include <linux/timer.h>
23723 #include <linux/kernel.h>
23724 #include <linux/wait.h>
23725@@ -34,6 +35,7 @@ static int vfd_is_open;
23726 static unsigned char vfd[40];
23727 static int vfd_cursor;
23728 static unsigned char ledpb, led;
23729+static DEFINE_MUTEX(vfd_mutex);
23730
23731 static void update_vfd(void)
23732 {
23733@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23734 if (!vfd_is_open)
23735 return -EBUSY;
23736
23737+ mutex_lock(&vfd_mutex);
23738 for (;;) {
23739 char c;
23740 if (!indx)
23741 break;
23742- if (get_user(c, buf))
23743+ if (get_user(c, buf)) {
23744+ mutex_unlock(&vfd_mutex);
23745 return -EFAULT;
23746+ }
23747 if (esc) {
23748 set_led(c);
23749 esc = 0;
23750@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23751 buf++;
23752 }
23753 update_vfd();
23754+ mutex_unlock(&vfd_mutex);
23755
23756 return len;
23757 }
23758diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23759--- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23760+++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23761@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23762 switch (cmd) {
23763
23764 case RTC_PLL_GET:
23765+ memset(&pll, 0, sizeof(pll));
23766 if (get_rtc_pll(&pll))
23767 return -EINVAL;
23768 else
23769diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23770--- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23771+++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23772@@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23773 }
23774
23775 static int
23776-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23777+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23778 struct hpet_info *info)
23779 {
23780 struct hpet_timer __iomem *timer;
23781diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23782--- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23783+++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23784@@ -414,7 +414,7 @@ struct ipmi_smi {
23785 struct proc_dir_entry *proc_dir;
23786 char proc_dir_name[10];
23787
23788- atomic_t stats[IPMI_NUM_STATS];
23789+ atomic_unchecked_t stats[IPMI_NUM_STATS];
23790
23791 /*
23792 * run_to_completion duplicate of smb_info, smi_info
23793@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23794
23795
23796 #define ipmi_inc_stat(intf, stat) \
23797- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23798+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23799 #define ipmi_get_stat(intf, stat) \
23800- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23801+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23802
23803 static int is_lan_addr(struct ipmi_addr *addr)
23804 {
23805@@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23806 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23807 init_waitqueue_head(&intf->waitq);
23808 for (i = 0; i < IPMI_NUM_STATS; i++)
23809- atomic_set(&intf->stats[i], 0);
23810+ atomic_set_unchecked(&intf->stats[i], 0);
23811
23812 intf->proc_dir = NULL;
23813
23814@@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23815 struct ipmi_smi_msg smi_msg;
23816 struct ipmi_recv_msg recv_msg;
23817
23818+ pax_track_stack();
23819+
23820 si = (struct ipmi_system_interface_addr *) &addr;
23821 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23822 si->channel = IPMI_BMC_CHANNEL;
23823diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23824--- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23825+++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23826@@ -276,7 +276,7 @@ struct smi_info {
23827 unsigned char slave_addr;
23828
23829 /* Counters and things for the proc filesystem. */
23830- atomic_t stats[SI_NUM_STATS];
23831+ atomic_unchecked_t stats[SI_NUM_STATS];
23832
23833 struct task_struct *thread;
23834
23835@@ -285,9 +285,9 @@ struct smi_info {
23836 };
23837
23838 #define smi_inc_stat(smi, stat) \
23839- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23840+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23841 #define smi_get_stat(smi, stat) \
23842- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23843+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23844
23845 #define SI_MAX_PARMS 4
23846
23847@@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23848 atomic_set(&new_smi->req_events, 0);
23849 new_smi->run_to_completion = 0;
23850 for (i = 0; i < SI_NUM_STATS; i++)
23851- atomic_set(&new_smi->stats[i], 0);
23852+ atomic_set_unchecked(&new_smi->stats[i], 0);
23853
23854 new_smi->interrupt_disabled = 1;
23855 atomic_set(&new_smi->stop_operation, 0);
23856diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23857--- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23858+++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23859@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23860
23861 config DEVKMEM
23862 bool "/dev/kmem virtual device support"
23863- default y
23864+ default n
23865+ depends on !GRKERNSEC_KMEM
23866 help
23867 Say Y here if you want to support the /dev/kmem device. The
23868 /dev/kmem device is rarely used, but can be used for certain
23869@@ -596,6 +597,7 @@ config DEVPORT
23870 bool
23871 depends on !M68K
23872 depends on ISA || PCI
23873+ depends on !GRKERNSEC_KMEM
23874 default y
23875
23876 source "drivers/s390/char/Kconfig"
23877diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23878--- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23879+++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23880@@ -18,6 +18,7 @@
23881 #include <linux/raw.h>
23882 #include <linux/tty.h>
23883 #include <linux/capability.h>
23884+#include <linux/security.h>
23885 #include <linux/ptrace.h>
23886 #include <linux/device.h>
23887 #include <linux/highmem.h>
23888@@ -34,6 +35,10 @@
23889 # include <linux/efi.h>
23890 #endif
23891
23892+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23893+extern struct file_operations grsec_fops;
23894+#endif
23895+
23896 static inline unsigned long size_inside_page(unsigned long start,
23897 unsigned long size)
23898 {
23899@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23900
23901 while (cursor < to) {
23902 if (!devmem_is_allowed(pfn)) {
23903+#ifdef CONFIG_GRKERNSEC_KMEM
23904+ gr_handle_mem_readwrite(from, to);
23905+#else
23906 printk(KERN_INFO
23907 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23908 current->comm, from, to);
23909+#endif
23910 return 0;
23911 }
23912 cursor += PAGE_SIZE;
23913@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23914 }
23915 return 1;
23916 }
23917+#elif defined(CONFIG_GRKERNSEC_KMEM)
23918+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23919+{
23920+ return 0;
23921+}
23922 #else
23923 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23924 {
23925@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23926
23927 while (count > 0) {
23928 unsigned long remaining;
23929+ char *temp;
23930
23931 sz = size_inside_page(p, count);
23932
23933@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23934 if (!ptr)
23935 return -EFAULT;
23936
23937- remaining = copy_to_user(buf, ptr, sz);
23938+#ifdef CONFIG_PAX_USERCOPY
23939+ temp = kmalloc(sz, GFP_KERNEL);
23940+ if (!temp) {
23941+ unxlate_dev_mem_ptr(p, ptr);
23942+ return -ENOMEM;
23943+ }
23944+ memcpy(temp, ptr, sz);
23945+#else
23946+ temp = ptr;
23947+#endif
23948+
23949+ remaining = copy_to_user(buf, temp, sz);
23950+
23951+#ifdef CONFIG_PAX_USERCOPY
23952+ kfree(temp);
23953+#endif
23954+
23955 unxlate_dev_mem_ptr(p, ptr);
23956 if (remaining)
23957 return -EFAULT;
23958@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23959 size_t count, loff_t *ppos)
23960 {
23961 unsigned long p = *ppos;
23962- ssize_t low_count, read, sz;
23963+ ssize_t low_count, read, sz, err = 0;
23964 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23965- int err = 0;
23966
23967 read = 0;
23968 if (p < (unsigned long) high_memory) {
23969@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23970 }
23971 #endif
23972 while (low_count > 0) {
23973+ char *temp;
23974+
23975 sz = size_inside_page(p, low_count);
23976
23977 /*
23978@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23979 */
23980 kbuf = xlate_dev_kmem_ptr((char *)p);
23981
23982- if (copy_to_user(buf, kbuf, sz))
23983+#ifdef CONFIG_PAX_USERCOPY
23984+ temp = kmalloc(sz, GFP_KERNEL);
23985+ if (!temp)
23986+ return -ENOMEM;
23987+ memcpy(temp, kbuf, sz);
23988+#else
23989+ temp = kbuf;
23990+#endif
23991+
23992+ err = copy_to_user(buf, temp, sz);
23993+
23994+#ifdef CONFIG_PAX_USERCOPY
23995+ kfree(temp);
23996+#endif
23997+
23998+ if (err)
23999 return -EFAULT;
24000 buf += sz;
24001 p += sz;
24002@@ -854,6 +901,9 @@ static const struct memdev {
24003 #ifdef CONFIG_CRASH_DUMP
24004 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24005 #endif
24006+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24007+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24008+#endif
24009 };
24010
24011 static int memory_open(struct inode *inode, struct file *filp)
24012diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24013--- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24014+++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24015@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24016
24017 spin_unlock_irq(&rtc_lock);
24018
24019- if (copy_to_user(buf, contents, tmp - contents))
24020+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24021 return -EFAULT;
24022
24023 *ppos = i;
24024diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24025--- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24026+++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24027@@ -261,8 +261,13 @@
24028 /*
24029 * Configuration information
24030 */
24031+#ifdef CONFIG_GRKERNSEC_RANDNET
24032+#define INPUT_POOL_WORDS 512
24033+#define OUTPUT_POOL_WORDS 128
24034+#else
24035 #define INPUT_POOL_WORDS 128
24036 #define OUTPUT_POOL_WORDS 32
24037+#endif
24038 #define SEC_XFER_SIZE 512
24039 #define EXTRACT_SIZE 10
24040
24041@@ -300,10 +305,17 @@ static struct poolinfo {
24042 int poolwords;
24043 int tap1, tap2, tap3, tap4, tap5;
24044 } poolinfo_table[] = {
24045+#ifdef CONFIG_GRKERNSEC_RANDNET
24046+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24047+ { 512, 411, 308, 208, 104, 1 },
24048+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24049+ { 128, 103, 76, 51, 25, 1 },
24050+#else
24051 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24052 { 128, 103, 76, 51, 25, 1 },
24053 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24054 { 32, 26, 20, 14, 7, 1 },
24055+#endif
24056 #if 0
24057 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24058 { 2048, 1638, 1231, 819, 411, 1 },
24059@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24060
24061 extract_buf(r, tmp);
24062 i = min_t(int, nbytes, EXTRACT_SIZE);
24063- if (copy_to_user(buf, tmp, i)) {
24064+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24065 ret = -EFAULT;
24066 break;
24067 }
24068@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24069 #include <linux/sysctl.h>
24070
24071 static int min_read_thresh = 8, min_write_thresh;
24072-static int max_read_thresh = INPUT_POOL_WORDS * 32;
24073+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24074 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24075 static char sysctl_bootid[16];
24076
24077diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24078--- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24079+++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24080@@ -55,6 +55,7 @@
24081 #include <asm/uaccess.h>
24082 #include <asm/io.h>
24083 #include <asm/system.h>
24084+#include <asm/local.h>
24085
24086 #include <linux/sonypi.h>
24087
24088@@ -491,7 +492,7 @@ static struct sonypi_device {
24089 spinlock_t fifo_lock;
24090 wait_queue_head_t fifo_proc_list;
24091 struct fasync_struct *fifo_async;
24092- int open_count;
24093+ local_t open_count;
24094 int model;
24095 struct input_dev *input_jog_dev;
24096 struct input_dev *input_key_dev;
24097@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24098 static int sonypi_misc_release(struct inode *inode, struct file *file)
24099 {
24100 mutex_lock(&sonypi_device.lock);
24101- sonypi_device.open_count--;
24102+ local_dec(&sonypi_device.open_count);
24103 mutex_unlock(&sonypi_device.lock);
24104 return 0;
24105 }
24106@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24107 {
24108 mutex_lock(&sonypi_device.lock);
24109 /* Flush input queue on first open */
24110- if (!sonypi_device.open_count)
24111+ if (!local_read(&sonypi_device.open_count))
24112 kfifo_reset(&sonypi_device.fifo);
24113- sonypi_device.open_count++;
24114+ local_inc(&sonypi_device.open_count);
24115 mutex_unlock(&sonypi_device.lock);
24116
24117 return 0;
24118diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24119--- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24120+++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24121@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24122 event = addr;
24123
24124 if ((event->event_type == 0 && event->event_size == 0) ||
24125- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24126+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24127 return NULL;
24128
24129 return addr;
24130@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24131 return NULL;
24132
24133 if ((event->event_type == 0 && event->event_size == 0) ||
24134- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24135+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24136 return NULL;
24137
24138 (*pos)++;
24139@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24140 int i;
24141
24142 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24143- seq_putc(m, data[i]);
24144+ if (!seq_putc(m, data[i]))
24145+ return -EFAULT;
24146
24147 return 0;
24148 }
24149@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24150 log->bios_event_log_end = log->bios_event_log + len;
24151
24152 virt = acpi_os_map_memory(start, len);
24153+ if (!virt) {
24154+ kfree(log->bios_event_log);
24155+ log->bios_event_log = NULL;
24156+ return -EFAULT;
24157+ }
24158
24159 memcpy(log->bios_event_log, virt, len);
24160
24161diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24162--- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24163+++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24164@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24165 chip->vendor.req_complete_val)
24166 goto out_recv;
24167
24168- if ((status == chip->vendor.req_canceled)) {
24169+ if (status == chip->vendor.req_canceled) {
24170 dev_err(chip->dev, "Operation Canceled\n");
24171 rc = -ECANCELED;
24172 goto out;
24173@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24174
24175 struct tpm_chip *chip = dev_get_drvdata(dev);
24176
24177+ pax_track_stack();
24178+
24179 tpm_cmd.header.in = tpm_readpubek_header;
24180 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24181 "attempting to read the PUBEK");
24182diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24183--- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24184+++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24185@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24186 0xCA, 0x34, 0x2B, 0x2E};
24187 struct scatterlist sg;
24188
24189+ pax_track_stack();
24190+
24191 memset(src, 0, sizeof(src));
24192 memset(ctx.key, 0, sizeof(ctx.key));
24193
24194diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24195--- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24196+++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24197@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24198 struct crypto_aes_ctx gen_aes;
24199 int cpu;
24200
24201+ pax_track_stack();
24202+
24203 if (key_len % 8) {
24204 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24205 return -EINVAL;
24206diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24207--- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24208+++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24209@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24210 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24211 static int edac_pci_poll_msec = 1000; /* one second workq period */
24212
24213-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24214-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24215+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24216+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24217
24218 static struct kobject *edac_pci_top_main_kobj;
24219 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24220@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24221 edac_printk(KERN_CRIT, EDAC_PCI,
24222 "Signaled System Error on %s\n",
24223 pci_name(dev));
24224- atomic_inc(&pci_nonparity_count);
24225+ atomic_inc_unchecked(&pci_nonparity_count);
24226 }
24227
24228 if (status & (PCI_STATUS_PARITY)) {
24229@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24230 "Master Data Parity Error on %s\n",
24231 pci_name(dev));
24232
24233- atomic_inc(&pci_parity_count);
24234+ atomic_inc_unchecked(&pci_parity_count);
24235 }
24236
24237 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24238@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24239 "Detected Parity Error on %s\n",
24240 pci_name(dev));
24241
24242- atomic_inc(&pci_parity_count);
24243+ atomic_inc_unchecked(&pci_parity_count);
24244 }
24245 }
24246
24247@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24248 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24249 "Signaled System Error on %s\n",
24250 pci_name(dev));
24251- atomic_inc(&pci_nonparity_count);
24252+ atomic_inc_unchecked(&pci_nonparity_count);
24253 }
24254
24255 if (status & (PCI_STATUS_PARITY)) {
24256@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24257 "Master Data Parity Error on "
24258 "%s\n", pci_name(dev));
24259
24260- atomic_inc(&pci_parity_count);
24261+ atomic_inc_unchecked(&pci_parity_count);
24262 }
24263
24264 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24265@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24266 "Detected Parity Error on %s\n",
24267 pci_name(dev));
24268
24269- atomic_inc(&pci_parity_count);
24270+ atomic_inc_unchecked(&pci_parity_count);
24271 }
24272 }
24273 }
24274@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24275 if (!check_pci_errors)
24276 return;
24277
24278- before_count = atomic_read(&pci_parity_count);
24279+ before_count = atomic_read_unchecked(&pci_parity_count);
24280
24281 /* scan all PCI devices looking for a Parity Error on devices and
24282 * bridges.
24283@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24284 /* Only if operator has selected panic on PCI Error */
24285 if (edac_pci_get_panic_on_pe()) {
24286 /* If the count is different 'after' from 'before' */
24287- if (before_count != atomic_read(&pci_parity_count))
24288+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24289 panic("EDAC: PCI Parity Error");
24290 }
24291 }
24292diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24293--- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24294+++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24295@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24296 char *type, *optype, *err, *msg;
24297 unsigned long error = m->status & 0x1ff0000l;
24298 u32 optypenum = (m->status >> 4) & 0x07;
24299- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24300+ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24301 u32 dimm = (m->misc >> 16) & 0x3;
24302 u32 channel = (m->misc >> 18) & 0x3;
24303 u32 syndrome = m->misc >> 32;
24304diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24305--- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24306+++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24307@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24308 bool (*dc_mce)(u16, u8);
24309 bool (*ic_mce)(u16, u8);
24310 bool (*nb_mce)(u16, u8);
24311-};
24312+} __no_const;
24313
24314 void amd_report_gart_errors(bool);
24315 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24316diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24317--- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24318+++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24319@@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24320
24321 void fw_core_remove_card(struct fw_card *card)
24322 {
24323- struct fw_card_driver dummy_driver = dummy_driver_template;
24324+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24325
24326 card->driver->update_phy_reg(card, 4,
24327 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24328diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24329--- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24330+++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24331@@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24332 int ret;
24333
24334 if ((request->channels == 0 && request->bandwidth == 0) ||
24335- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24336- request->bandwidth < 0)
24337+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24338 return -EINVAL;
24339
24340 r = kmalloc(sizeof(*r), GFP_KERNEL);
24341diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24342--- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24343+++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24344@@ -99,6 +99,7 @@ struct fw_card_driver {
24345
24346 int (*stop_iso)(struct fw_iso_context *ctx);
24347 };
24348+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24349
24350 void fw_card_initialize(struct fw_card *card,
24351 const struct fw_card_driver *driver, struct device *device);
24352diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24353--- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24354+++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24355@@ -36,6 +36,7 @@
24356 #include <linux/string.h>
24357 #include <linux/timer.h>
24358 #include <linux/types.h>
24359+#include <linux/sched.h>
24360
24361 #include <asm/byteorder.h>
24362
24363@@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24364 struct transaction_callback_data d;
24365 struct fw_transaction t;
24366
24367+ pax_track_stack();
24368+
24369 init_timer_on_stack(&t.split_timeout_timer);
24370 init_completion(&d.done);
24371 d.payload = payload;
24372diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24373--- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24374+++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24375@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24376 }
24377 }
24378 else {
24379- /*
24380- * no iounmap() for that ioremap(); it would be a no-op, but
24381- * it's so early in setup that sucker gets confused into doing
24382- * what it shouldn't if we actually call it.
24383- */
24384 p = dmi_ioremap(0xF0000, 0x10000);
24385 if (p == NULL)
24386 goto error;
24387diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24388--- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24389+++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24390@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24391 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24392 maskl, pendl, maskh, pendh);
24393
24394- atomic_inc(&irq_err_count);
24395+ atomic_inc_unchecked(&irq_err_count);
24396
24397 return -EINVAL;
24398 }
24399diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24400--- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24401+++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24402@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24403 struct drm_crtc *tmp;
24404 int crtc_mask = 1;
24405
24406- WARN(!crtc, "checking null crtc?\n");
24407+ BUG_ON(!crtc);
24408
24409 dev = crtc->dev;
24410
24411@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24412 struct drm_encoder *encoder;
24413 bool ret = true;
24414
24415+ pax_track_stack();
24416+
24417 crtc->enabled = drm_helper_crtc_in_use(crtc);
24418 if (!crtc->enabled)
24419 return true;
24420diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24421--- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24422+++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24423@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24424
24425 dev = file_priv->minor->dev;
24426 atomic_inc(&dev->ioctl_count);
24427- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24428+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24429 ++file_priv->ioctl_count;
24430
24431 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24432diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24433--- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24434+++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24435@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24436 }
24437
24438 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24439- atomic_set(&dev->counts[i], 0);
24440+ atomic_set_unchecked(&dev->counts[i], 0);
24441
24442 dev->sigdata.lock = NULL;
24443
24444@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24445
24446 retcode = drm_open_helper(inode, filp, dev);
24447 if (!retcode) {
24448- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24449- if (!dev->open_count++)
24450+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24451+ if (local_inc_return(&dev->open_count) == 1)
24452 retcode = drm_setup(dev);
24453 }
24454 if (!retcode) {
24455@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24456
24457 mutex_lock(&drm_global_mutex);
24458
24459- DRM_DEBUG("open_count = %d\n", dev->open_count);
24460+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24461
24462 if (dev->driver->preclose)
24463 dev->driver->preclose(dev, file_priv);
24464@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24465 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24466 task_pid_nr(current),
24467 (long)old_encode_dev(file_priv->minor->device),
24468- dev->open_count);
24469+ local_read(&dev->open_count));
24470
24471 /* if the master has gone away we can't do anything with the lock */
24472 if (file_priv->minor->master)
24473@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24474 * End inline drm_release
24475 */
24476
24477- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24478- if (!--dev->open_count) {
24479+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24480+ if (local_dec_and_test(&dev->open_count)) {
24481 if (atomic_read(&dev->ioctl_count)) {
24482 DRM_ERROR("Device busy: %d\n",
24483 atomic_read(&dev->ioctl_count));
24484diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24485--- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24486+++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24487@@ -36,7 +36,7 @@
24488 struct drm_global_item {
24489 struct mutex mutex;
24490 void *object;
24491- int refcount;
24492+ atomic_t refcount;
24493 };
24494
24495 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24496@@ -49,7 +49,7 @@ void drm_global_init(void)
24497 struct drm_global_item *item = &glob[i];
24498 mutex_init(&item->mutex);
24499 item->object = NULL;
24500- item->refcount = 0;
24501+ atomic_set(&item->refcount, 0);
24502 }
24503 }
24504
24505@@ -59,7 +59,7 @@ void drm_global_release(void)
24506 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24507 struct drm_global_item *item = &glob[i];
24508 BUG_ON(item->object != NULL);
24509- BUG_ON(item->refcount != 0);
24510+ BUG_ON(atomic_read(&item->refcount) != 0);
24511 }
24512 }
24513
24514@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24515 void *object;
24516
24517 mutex_lock(&item->mutex);
24518- if (item->refcount == 0) {
24519+ if (atomic_read(&item->refcount) == 0) {
24520 item->object = kzalloc(ref->size, GFP_KERNEL);
24521 if (unlikely(item->object == NULL)) {
24522 ret = -ENOMEM;
24523@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24524 goto out_err;
24525
24526 }
24527- ++item->refcount;
24528+ atomic_inc(&item->refcount);
24529 ref->object = item->object;
24530 object = item->object;
24531 mutex_unlock(&item->mutex);
24532@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24533 struct drm_global_item *item = &glob[ref->global_type];
24534
24535 mutex_lock(&item->mutex);
24536- BUG_ON(item->refcount == 0);
24537+ BUG_ON(atomic_read(&item->refcount) == 0);
24538 BUG_ON(ref->object != item->object);
24539- if (--item->refcount == 0) {
24540+ if (atomic_dec_and_test(&item->refcount)) {
24541 ref->release(ref);
24542 item->object = NULL;
24543 }
24544diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24545--- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24546+++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24547@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24548 struct drm_local_map *map;
24549 struct drm_map_list *r_list;
24550
24551- /* Hardcoded from _DRM_FRAME_BUFFER,
24552- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24553- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24554- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24555+ static const char * const types[] = {
24556+ [_DRM_FRAME_BUFFER] = "FB",
24557+ [_DRM_REGISTERS] = "REG",
24558+ [_DRM_SHM] = "SHM",
24559+ [_DRM_AGP] = "AGP",
24560+ [_DRM_SCATTER_GATHER] = "SG",
24561+ [_DRM_CONSISTENT] = "PCI",
24562+ [_DRM_GEM] = "GEM" };
24563 const char *type;
24564 int i;
24565
24566@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24567 map = r_list->map;
24568 if (!map)
24569 continue;
24570- if (map->type < 0 || map->type > 5)
24571+ if (map->type >= ARRAY_SIZE(types))
24572 type = "??";
24573 else
24574 type = types[map->type];
24575@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24576 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24577 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24578 vma->vm_flags & VM_IO ? 'i' : '-',
24579+#ifdef CONFIG_GRKERNSEC_HIDESYM
24580+ 0);
24581+#else
24582 vma->vm_pgoff);
24583+#endif
24584
24585 #if defined(__i386__)
24586 pgprot = pgprot_val(vma->vm_page_prot);
24587diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24588--- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24589+++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24590@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24591 stats->data[i].value =
24592 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24593 else
24594- stats->data[i].value = atomic_read(&dev->counts[i]);
24595+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24596 stats->data[i].type = dev->types[i];
24597 }
24598
24599diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24600--- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24601+++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24602@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24603 if (drm_lock_take(&master->lock, lock->context)) {
24604 master->lock.file_priv = file_priv;
24605 master->lock.lock_time = jiffies;
24606- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24607+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24608 break; /* Got lock */
24609 }
24610
24611@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24612 return -EINVAL;
24613 }
24614
24615- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24616+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24617
24618 if (drm_lock_free(&master->lock, lock->context)) {
24619 /* FIXME: Should really bail out here. */
24620diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24621--- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24622+++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24623@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24624 dma->buflist[vertex->idx],
24625 vertex->discard, vertex->used);
24626
24627- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24628- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24629+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24630+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24631 sarea_priv->last_enqueue = dev_priv->counter - 1;
24632 sarea_priv->last_dispatch = (int)hw_status[5];
24633
24634@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24635 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24636 mc->last_render);
24637
24638- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24639- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24640+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24641+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24642 sarea_priv->last_enqueue = dev_priv->counter - 1;
24643 sarea_priv->last_dispatch = (int)hw_status[5];
24644
24645diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24646--- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24647+++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24648@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24649 int page_flipping;
24650
24651 wait_queue_head_t irq_queue;
24652- atomic_t irq_received;
24653- atomic_t irq_emitted;
24654+ atomic_unchecked_t irq_received;
24655+ atomic_unchecked_t irq_emitted;
24656
24657 int front_offset;
24658 } drm_i810_private_t;
24659diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24660--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24661+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24662@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24663 I915_READ(GTIMR));
24664 }
24665 seq_printf(m, "Interrupts received: %d\n",
24666- atomic_read(&dev_priv->irq_received));
24667+ atomic_read_unchecked(&dev_priv->irq_received));
24668 for (i = 0; i < I915_NUM_RINGS; i++) {
24669 if (IS_GEN6(dev)) {
24670 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24671diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24672--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24673+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24674@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24675 bool can_switch;
24676
24677 spin_lock(&dev->count_lock);
24678- can_switch = (dev->open_count == 0);
24679+ can_switch = (local_read(&dev->open_count) == 0);
24680 spin_unlock(&dev->count_lock);
24681 return can_switch;
24682 }
24683diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24684--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24685+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24686@@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24687 /* display clock increase/decrease */
24688 /* pll clock increase/decrease */
24689 /* clock gating init */
24690-};
24691+} __no_const;
24692
24693 struct intel_device_info {
24694 u8 gen;
24695@@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24696 int current_page;
24697 int page_flipping;
24698
24699- atomic_t irq_received;
24700+ atomic_unchecked_t irq_received;
24701
24702 /* protects the irq masks */
24703 spinlock_t irq_lock;
24704@@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24705 * will be page flipped away on the next vblank. When it
24706 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24707 */
24708- atomic_t pending_flip;
24709+ atomic_unchecked_t pending_flip;
24710 };
24711
24712 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24713@@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24714 extern void intel_teardown_gmbus(struct drm_device *dev);
24715 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24716 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24717-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24718+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24719 {
24720 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24721 }
24722diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24723--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24724+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24725@@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24726 i915_gem_release_mmap(obj);
24727
24728 if (obj->base.pending_write_domain)
24729- cd->flips |= atomic_read(&obj->pending_flip);
24730+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24731
24732 /* The actual obj->write_domain will be updated with
24733 * pending_write_domain after we emit the accumulated flush for all
24734diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24735--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24736+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24737@@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24738 int ret = IRQ_NONE, pipe;
24739 bool blc_event = false;
24740
24741- atomic_inc(&dev_priv->irq_received);
24742+ atomic_inc_unchecked(&dev_priv->irq_received);
24743
24744 if (HAS_PCH_SPLIT(dev))
24745 return ironlake_irq_handler(dev);
24746@@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24747 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24748 int pipe;
24749
24750- atomic_set(&dev_priv->irq_received, 0);
24751+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24752
24753 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24754 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24755diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24756--- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24757+++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24758@@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24759
24760 wait_event(dev_priv->pending_flip_queue,
24761 atomic_read(&dev_priv->mm.wedged) ||
24762- atomic_read(&obj->pending_flip) == 0);
24763+ atomic_read_unchecked(&obj->pending_flip) == 0);
24764
24765 /* Big Hammer, we also need to ensure that any pending
24766 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24767@@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24768 obj = to_intel_framebuffer(crtc->fb)->obj;
24769 dev_priv = crtc->dev->dev_private;
24770 wait_event(dev_priv->pending_flip_queue,
24771- atomic_read(&obj->pending_flip) == 0);
24772+ atomic_read_unchecked(&obj->pending_flip) == 0);
24773 }
24774
24775 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24776@@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24777
24778 atomic_clear_mask(1 << intel_crtc->plane,
24779 &obj->pending_flip.counter);
24780- if (atomic_read(&obj->pending_flip) == 0)
24781+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
24782 wake_up(&dev_priv->pending_flip_queue);
24783
24784 schedule_work(&work->work);
24785@@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24786 /* Block clients from rendering to the new back buffer until
24787 * the flip occurs and the object is no longer visible.
24788 */
24789- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24790+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24791
24792 switch (INTEL_INFO(dev)->gen) {
24793 case 2:
24794diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24795--- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24796+++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24797@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24798 u32 clear_cmd;
24799 u32 maccess;
24800
24801- atomic_t vbl_received; /**< Number of vblanks received. */
24802+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24803 wait_queue_head_t fence_queue;
24804- atomic_t last_fence_retired;
24805+ atomic_unchecked_t last_fence_retired;
24806 u32 next_fence_to_post;
24807
24808 unsigned int fb_cpp;
24809diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24810--- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24811+++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24812@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24813 if (crtc != 0)
24814 return 0;
24815
24816- return atomic_read(&dev_priv->vbl_received);
24817+ return atomic_read_unchecked(&dev_priv->vbl_received);
24818 }
24819
24820
24821@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24822 /* VBLANK interrupt */
24823 if (status & MGA_VLINEPEN) {
24824 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24825- atomic_inc(&dev_priv->vbl_received);
24826+ atomic_inc_unchecked(&dev_priv->vbl_received);
24827 drm_handle_vblank(dev, 0);
24828 handled = 1;
24829 }
24830@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24831 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24832 MGA_WRITE(MGA_PRIMEND, prim_end);
24833
24834- atomic_inc(&dev_priv->last_fence_retired);
24835+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
24836 DRM_WAKEUP(&dev_priv->fence_queue);
24837 handled = 1;
24838 }
24839@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24840 * using fences.
24841 */
24842 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24843- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24844+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24845 - *sequence) <= (1 << 23)));
24846
24847 *sequence = cur_fence;
24848diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24849--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24850+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24851@@ -228,7 +228,7 @@ struct nouveau_channel {
24852 struct list_head pending;
24853 uint32_t sequence;
24854 uint32_t sequence_ack;
24855- atomic_t last_sequence_irq;
24856+ atomic_unchecked_t last_sequence_irq;
24857 } fence;
24858
24859 /* DMA push buffer */
24860@@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24861 struct nouveau_mc_engine {
24862 int (*init)(struct drm_device *dev);
24863 void (*takedown)(struct drm_device *dev);
24864-};
24865+} __no_const;
24866
24867 struct nouveau_timer_engine {
24868 int (*init)(struct drm_device *dev);
24869 void (*takedown)(struct drm_device *dev);
24870 uint64_t (*read)(struct drm_device *dev);
24871-};
24872+} __no_const;
24873
24874 struct nouveau_fb_engine {
24875 int num_tiles;
24876@@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24877 void (*put)(struct drm_device *, struct nouveau_mem **);
24878
24879 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24880-};
24881+} __no_const;
24882
24883 struct nouveau_engine {
24884 struct nouveau_instmem_engine instmem;
24885@@ -662,7 +662,7 @@ struct drm_nouveau_private {
24886 struct drm_global_reference mem_global_ref;
24887 struct ttm_bo_global_ref bo_global_ref;
24888 struct ttm_bo_device bdev;
24889- atomic_t validate_sequence;
24890+ atomic_unchecked_t validate_sequence;
24891 } ttm;
24892
24893 struct {
24894diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24895--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24896+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24897@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24898 if (USE_REFCNT(dev))
24899 sequence = nvchan_rd32(chan, 0x48);
24900 else
24901- sequence = atomic_read(&chan->fence.last_sequence_irq);
24902+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24903
24904 if (chan->fence.sequence_ack == sequence)
24905 goto out;
24906@@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24907 out_initialised:
24908 INIT_LIST_HEAD(&chan->fence.pending);
24909 spin_lock_init(&chan->fence.lock);
24910- atomic_set(&chan->fence.last_sequence_irq, 0);
24911+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24912 return 0;
24913 }
24914
24915diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24916--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24917+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24918@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24919 int trycnt = 0;
24920 int ret, i;
24921
24922- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24923+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24924 retry:
24925 if (++trycnt > 100000) {
24926 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24927diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24928--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24929+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24930@@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24931 bool can_switch;
24932
24933 spin_lock(&dev->count_lock);
24934- can_switch = (dev->open_count == 0);
24935+ can_switch = (local_read(&dev->open_count) == 0);
24936 spin_unlock(&dev->count_lock);
24937 return can_switch;
24938 }
24939diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24940--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24941+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24942@@ -552,7 +552,7 @@ static int
24943 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24944 u32 class, u32 mthd, u32 data)
24945 {
24946- atomic_set(&chan->fence.last_sequence_irq, data);
24947+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24948 return 0;
24949 }
24950
24951diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24952--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24953+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24954@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24955
24956 /* GH: Simple idle check.
24957 */
24958- atomic_set(&dev_priv->idle_count, 0);
24959+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24960
24961 /* We don't support anything other than bus-mastering ring mode,
24962 * but the ring can be in either AGP or PCI space for the ring
24963diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
24964--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
24965+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
24966@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24967 int is_pci;
24968 unsigned long cce_buffers_offset;
24969
24970- atomic_t idle_count;
24971+ atomic_unchecked_t idle_count;
24972
24973 int page_flipping;
24974 int current_page;
24975 u32 crtc_offset;
24976 u32 crtc_offset_cntl;
24977
24978- atomic_t vbl_received;
24979+ atomic_unchecked_t vbl_received;
24980
24981 u32 color_fmt;
24982 unsigned int front_offset;
24983diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
24984--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
24985+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
24986@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24987 if (crtc != 0)
24988 return 0;
24989
24990- return atomic_read(&dev_priv->vbl_received);
24991+ return atomic_read_unchecked(&dev_priv->vbl_received);
24992 }
24993
24994 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24995@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24996 /* VBLANK interrupt */
24997 if (status & R128_CRTC_VBLANK_INT) {
24998 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24999- atomic_inc(&dev_priv->vbl_received);
25000+ atomic_inc_unchecked(&dev_priv->vbl_received);
25001 drm_handle_vblank(dev, 0);
25002 return IRQ_HANDLED;
25003 }
25004diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25005--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25006+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25007@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25008
25009 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25010 {
25011- if (atomic_read(&dev_priv->idle_count) == 0)
25012+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25013 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25014 else
25015- atomic_set(&dev_priv->idle_count, 0);
25016+ atomic_set_unchecked(&dev_priv->idle_count, 0);
25017 }
25018
25019 #endif
25020diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25021--- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25022+++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25023@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25024 char name[512];
25025 int i;
25026
25027+ pax_track_stack();
25028+
25029 ctx->card = card;
25030 ctx->bios = bios;
25031
25032diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25033--- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25034+++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25035@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25036 regex_t mask_rex;
25037 regmatch_t match[4];
25038 char buf[1024];
25039- size_t end;
25040+ long end;
25041 int len;
25042 int done = 0;
25043 int r;
25044 unsigned o;
25045 struct offset *offset;
25046 char last_reg_s[10];
25047- int last_reg;
25048+ unsigned long last_reg;
25049
25050 if (regcomp
25051 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25052diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25053--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25054+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25055@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25056 struct radeon_gpio_rec gpio;
25057 struct radeon_hpd hpd;
25058
25059+ pax_track_stack();
25060+
25061 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25062 return false;
25063
25064diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25065--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25066+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25067@@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25068 bool can_switch;
25069
25070 spin_lock(&dev->count_lock);
25071- can_switch = (dev->open_count == 0);
25072+ can_switch = (local_read(&dev->open_count) == 0);
25073 spin_unlock(&dev->count_lock);
25074 return can_switch;
25075 }
25076diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25077--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25078+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25079@@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25080 uint32_t post_div;
25081 u32 pll_out_min, pll_out_max;
25082
25083+ pax_track_stack();
25084+
25085 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25086 freq = freq * 1000;
25087
25088diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25089--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25090+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25091@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25092
25093 /* SW interrupt */
25094 wait_queue_head_t swi_queue;
25095- atomic_t swi_emitted;
25096+ atomic_unchecked_t swi_emitted;
25097 int vblank_crtc;
25098 uint32_t irq_enable_reg;
25099 uint32_t r500_disp_irq_reg;
25100diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25101--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25102+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25103@@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25104 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25105 return 0;
25106 }
25107- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25108+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25109 if (!rdev->cp.ready) {
25110 /* FIXME: cp is not running assume everythings is done right
25111 * away
25112@@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25113 return r;
25114 }
25115 WREG32(rdev->fence_drv.scratch_reg, 0);
25116- atomic_set(&rdev->fence_drv.seq, 0);
25117+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25118 INIT_LIST_HEAD(&rdev->fence_drv.created);
25119 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25120 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25121diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25122--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25123+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25124@@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25125 */
25126 struct radeon_fence_driver {
25127 uint32_t scratch_reg;
25128- atomic_t seq;
25129+ atomic_unchecked_t seq;
25130 uint32_t last_seq;
25131 unsigned long last_jiffies;
25132 unsigned long last_timeout;
25133@@ -958,7 +958,7 @@ struct radeon_asic {
25134 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25135 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25136 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25137-};
25138+} __no_const;
25139
25140 /*
25141 * Asic structures
25142diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25143--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25144+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25145@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25146 request = compat_alloc_user_space(sizeof(*request));
25147 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25148 || __put_user(req32.param, &request->param)
25149- || __put_user((void __user *)(unsigned long)req32.value,
25150+ || __put_user((unsigned long)req32.value,
25151 &request->value))
25152 return -EFAULT;
25153
25154diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25155--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25156+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25157@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25158 unsigned int ret;
25159 RING_LOCALS;
25160
25161- atomic_inc(&dev_priv->swi_emitted);
25162- ret = atomic_read(&dev_priv->swi_emitted);
25163+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25164+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25165
25166 BEGIN_RING(4);
25167 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25168@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25169 drm_radeon_private_t *dev_priv =
25170 (drm_radeon_private_t *) dev->dev_private;
25171
25172- atomic_set(&dev_priv->swi_emitted, 0);
25173+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25174 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25175
25176 dev->max_vblank_count = 0x001fffff;
25177diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25178--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25179+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25180@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25181 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25182 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25183
25184- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25185+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25186 sarea_priv->nbox * sizeof(depth_boxes[0])))
25187 return -EFAULT;
25188
25189@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25190 {
25191 drm_radeon_private_t *dev_priv = dev->dev_private;
25192 drm_radeon_getparam_t *param = data;
25193- int value;
25194+ int value = 0;
25195
25196 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25197
25198diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25199--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25200+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25201@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25202 }
25203 if (unlikely(ttm_vm_ops == NULL)) {
25204 ttm_vm_ops = vma->vm_ops;
25205- radeon_ttm_vm_ops = *ttm_vm_ops;
25206- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25207+ pax_open_kernel();
25208+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25209+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25210+ pax_close_kernel();
25211 }
25212 vma->vm_ops = &radeon_ttm_vm_ops;
25213 return 0;
25214diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25215--- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25216+++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25217@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25218 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25219 rdev->pm.sideport_bandwidth.full)
25220 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25221- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25222+ read_delay_latency.full = dfixed_const(800 * 1000);
25223 read_delay_latency.full = dfixed_div(read_delay_latency,
25224 rdev->pm.igp_sideport_mclk);
25225+ a.full = dfixed_const(370);
25226+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25227 } else {
25228 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25229 rdev->pm.k8_bandwidth.full)
25230diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25231--- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25232+++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25233@@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25234 */
25235 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25236 {
25237- static atomic_t start_pool = ATOMIC_INIT(0);
25238+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25239 unsigned i;
25240- unsigned pool_offset = atomic_add_return(1, &start_pool);
25241+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25242 struct ttm_page_pool *pool;
25243
25244 pool_offset = pool_offset % NUM_POOLS;
25245diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25246--- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25247+++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25248@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25249 typedef uint32_t maskarray_t[5];
25250
25251 typedef struct drm_via_irq {
25252- atomic_t irq_received;
25253+ atomic_unchecked_t irq_received;
25254 uint32_t pending_mask;
25255 uint32_t enable_mask;
25256 wait_queue_head_t irq_queue;
25257@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25258 struct timeval last_vblank;
25259 int last_vblank_valid;
25260 unsigned usec_per_vblank;
25261- atomic_t vbl_received;
25262+ atomic_unchecked_t vbl_received;
25263 drm_via_state_t hc_state;
25264 char pci_buf[VIA_PCI_BUF_SIZE];
25265 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25266diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25267--- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25268+++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25269@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25270 if (crtc != 0)
25271 return 0;
25272
25273- return atomic_read(&dev_priv->vbl_received);
25274+ return atomic_read_unchecked(&dev_priv->vbl_received);
25275 }
25276
25277 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25278@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25279
25280 status = VIA_READ(VIA_REG_INTERRUPT);
25281 if (status & VIA_IRQ_VBLANK_PENDING) {
25282- atomic_inc(&dev_priv->vbl_received);
25283- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25284+ atomic_inc_unchecked(&dev_priv->vbl_received);
25285+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25286 do_gettimeofday(&cur_vblank);
25287 if (dev_priv->last_vblank_valid) {
25288 dev_priv->usec_per_vblank =
25289@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25290 dev_priv->last_vblank = cur_vblank;
25291 dev_priv->last_vblank_valid = 1;
25292 }
25293- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25294+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25295 DRM_DEBUG("US per vblank is: %u\n",
25296 dev_priv->usec_per_vblank);
25297 }
25298@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25299
25300 for (i = 0; i < dev_priv->num_irqs; ++i) {
25301 if (status & cur_irq->pending_mask) {
25302- atomic_inc(&cur_irq->irq_received);
25303+ atomic_inc_unchecked(&cur_irq->irq_received);
25304 DRM_WAKEUP(&cur_irq->irq_queue);
25305 handled = 1;
25306 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25307@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25308 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25309 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25310 masks[irq][4]));
25311- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25312+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25313 } else {
25314 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25315 (((cur_irq_sequence =
25316- atomic_read(&cur_irq->irq_received)) -
25317+ atomic_read_unchecked(&cur_irq->irq_received)) -
25318 *sequence) <= (1 << 23)));
25319 }
25320 *sequence = cur_irq_sequence;
25321@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25322 }
25323
25324 for (i = 0; i < dev_priv->num_irqs; ++i) {
25325- atomic_set(&cur_irq->irq_received, 0);
25326+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25327 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25328 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25329 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25330@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25331 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25332 case VIA_IRQ_RELATIVE:
25333 irqwait->request.sequence +=
25334- atomic_read(&cur_irq->irq_received);
25335+ atomic_read_unchecked(&cur_irq->irq_received);
25336 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25337 case VIA_IRQ_ABSOLUTE:
25338 break;
25339diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25340--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25341+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25342@@ -240,7 +240,7 @@ struct vmw_private {
25343 * Fencing and IRQs.
25344 */
25345
25346- atomic_t fence_seq;
25347+ atomic_unchecked_t fence_seq;
25348 wait_queue_head_t fence_queue;
25349 wait_queue_head_t fifo_queue;
25350 atomic_t fence_queue_waiters;
25351diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25352--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25353+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25354@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25355 while (!vmw_lag_lt(queue, us)) {
25356 spin_lock(&queue->lock);
25357 if (list_empty(&queue->head))
25358- sequence = atomic_read(&dev_priv->fence_seq);
25359+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25360 else {
25361 fence = list_first_entry(&queue->head,
25362 struct vmw_fence, head);
25363diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25364--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25365+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25366@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25367 (unsigned int) min,
25368 (unsigned int) fifo->capabilities);
25369
25370- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25371+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25372 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25373 vmw_fence_queue_init(&fifo->fence_queue);
25374 return vmw_fifo_send_fence(dev_priv, &dummy);
25375@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25376
25377 fm = vmw_fifo_reserve(dev_priv, bytes);
25378 if (unlikely(fm == NULL)) {
25379- *sequence = atomic_read(&dev_priv->fence_seq);
25380+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25381 ret = -ENOMEM;
25382 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25383 false, 3*HZ);
25384@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25385 }
25386
25387 do {
25388- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25389+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25390 } while (*sequence == 0);
25391
25392 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25393diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25394--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25395+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25396@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25397 * emitted. Then the fence is stale and signaled.
25398 */
25399
25400- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25401+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25402 > VMW_FENCE_WRAP);
25403
25404 return ret;
25405@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25406
25407 if (fifo_idle)
25408 down_read(&fifo_state->rwsem);
25409- signal_seq = atomic_read(&dev_priv->fence_seq);
25410+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25411 ret = 0;
25412
25413 for (;;) {
25414diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25415--- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25416+++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25417@@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25418
25419 int hid_add_device(struct hid_device *hdev)
25420 {
25421- static atomic_t id = ATOMIC_INIT(0);
25422+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25423 int ret;
25424
25425 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25426@@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25427 /* XXX hack, any other cleaner solution after the driver core
25428 * is converted to allow more than 20 bytes as the device name? */
25429 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25430- hdev->vendor, hdev->product, atomic_inc_return(&id));
25431+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25432
25433 hid_debug_register(hdev, dev_name(&hdev->dev));
25434 ret = device_add(&hdev->dev);
25435diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25436--- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25437+++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25438@@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25439 break;
25440
25441 case HIDIOCAPPLICATION:
25442- if (arg < 0 || arg >= hid->maxapplication)
25443+ if (arg >= hid->maxapplication)
25444 break;
25445
25446 for (i = 0; i < hid->maxcollection; i++)
25447diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25448--- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25449+++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25450@@ -113,7 +113,7 @@ struct sht15_data {
25451 int supply_uV;
25452 int supply_uV_valid;
25453 struct work_struct update_supply_work;
25454- atomic_t interrupt_handled;
25455+ atomic_unchecked_t interrupt_handled;
25456 };
25457
25458 /**
25459@@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25460 return ret;
25461
25462 gpio_direction_input(data->pdata->gpio_data);
25463- atomic_set(&data->interrupt_handled, 0);
25464+ atomic_set_unchecked(&data->interrupt_handled, 0);
25465
25466 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25467 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25468 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25469 /* Only relevant if the interrupt hasn't occurred. */
25470- if (!atomic_read(&data->interrupt_handled))
25471+ if (!atomic_read_unchecked(&data->interrupt_handled))
25472 schedule_work(&data->read_work);
25473 }
25474 ret = wait_event_timeout(data->wait_queue,
25475@@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25476 struct sht15_data *data = d;
25477 /* First disable the interrupt */
25478 disable_irq_nosync(irq);
25479- atomic_inc(&data->interrupt_handled);
25480+ atomic_inc_unchecked(&data->interrupt_handled);
25481 /* Then schedule a reading work struct */
25482 if (data->flag != SHT15_READING_NOTHING)
25483 schedule_work(&data->read_work);
25484@@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25485 here as could have gone low in meantime so verify
25486 it hasn't!
25487 */
25488- atomic_set(&data->interrupt_handled, 0);
25489+ atomic_set_unchecked(&data->interrupt_handled, 0);
25490 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25491 /* If still not occurred or another handler has been scheduled */
25492 if (gpio_get_value(data->pdata->gpio_data)
25493- || atomic_read(&data->interrupt_handled))
25494+ || atomic_read_unchecked(&data->interrupt_handled))
25495 return;
25496 }
25497 /* Read the data back from the device */
25498diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25499--- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25500+++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25501@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25502 struct i2c_board_info *info);
25503 static int w83791d_remove(struct i2c_client *client);
25504
25505-static int w83791d_read(struct i2c_client *client, u8 register);
25506-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25507+static int w83791d_read(struct i2c_client *client, u8 reg);
25508+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25509 static struct w83791d_data *w83791d_update_device(struct device *dev);
25510
25511 #ifdef DEBUG
25512diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25513--- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25514+++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25515@@ -43,7 +43,7 @@
25516 extern struct i2c_adapter amd756_smbus;
25517
25518 static struct i2c_adapter *s4882_adapter;
25519-static struct i2c_algorithm *s4882_algo;
25520+static i2c_algorithm_no_const *s4882_algo;
25521
25522 /* Wrapper access functions for multiplexed SMBus */
25523 static DEFINE_MUTEX(amd756_lock);
25524diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25525--- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25526+++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25527@@ -41,7 +41,7 @@
25528 extern struct i2c_adapter *nforce2_smbus;
25529
25530 static struct i2c_adapter *s4985_adapter;
25531-static struct i2c_algorithm *s4985_algo;
25532+static i2c_algorithm_no_const *s4985_algo;
25533
25534 /* Wrapper access functions for multiplexed SMBus */
25535 static DEFINE_MUTEX(nforce2_lock);
25536diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25537--- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25538+++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25539@@ -28,7 +28,7 @@
25540 /* multiplexer per channel data */
25541 struct i2c_mux_priv {
25542 struct i2c_adapter adap;
25543- struct i2c_algorithm algo;
25544+ i2c_algorithm_no_const algo;
25545
25546 struct i2c_adapter *parent;
25547 void *mux_dev; /* the mux chip/device */
25548diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25549--- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25550+++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25551@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25552 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25553 if ((unsigned long)buf & alignment
25554 || blk_rq_bytes(rq) & q->dma_pad_mask
25555- || object_is_on_stack(buf))
25556+ || object_starts_on_stack(buf))
25557 drive->dma = 0;
25558 }
25559 }
25560diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25561--- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25562+++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25563@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25564 u8 pc_buf[256], header_len, desc_cnt;
25565 int i, rc = 1, blocks, length;
25566
25567+ pax_track_stack();
25568+
25569 ide_debug_log(IDE_DBG_FUNC, "enter");
25570
25571 drive->bios_cyl = 0;
25572diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25573--- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25574+++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25575@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25576 int ret, i, n_ports = dev2 ? 4 : 2;
25577 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25578
25579+ pax_track_stack();
25580+
25581 for (i = 0; i < n_ports / 2; i++) {
25582 ret = ide_setup_pci_controller(pdev[i], d, !i);
25583 if (ret < 0)
25584diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25585--- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25586+++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25587@@ -113,7 +113,7 @@ static char const counter_group_names[CM
25588
25589 struct cm_counter_group {
25590 struct kobject obj;
25591- atomic_long_t counter[CM_ATTR_COUNT];
25592+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25593 };
25594
25595 struct cm_counter_attribute {
25596@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25597 struct ib_mad_send_buf *msg = NULL;
25598 int ret;
25599
25600- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25601+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25602 counter[CM_REQ_COUNTER]);
25603
25604 /* Quick state check to discard duplicate REQs. */
25605@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25606 if (!cm_id_priv)
25607 return;
25608
25609- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25610+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25611 counter[CM_REP_COUNTER]);
25612 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25613 if (ret)
25614@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25615 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25616 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25617 spin_unlock_irq(&cm_id_priv->lock);
25618- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25619+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25620 counter[CM_RTU_COUNTER]);
25621 goto out;
25622 }
25623@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25624 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25625 dreq_msg->local_comm_id);
25626 if (!cm_id_priv) {
25627- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25628+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25629 counter[CM_DREQ_COUNTER]);
25630 cm_issue_drep(work->port, work->mad_recv_wc);
25631 return -EINVAL;
25632@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25633 case IB_CM_MRA_REP_RCVD:
25634 break;
25635 case IB_CM_TIMEWAIT:
25636- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25637+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25638 counter[CM_DREQ_COUNTER]);
25639 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25640 goto unlock;
25641@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25642 cm_free_msg(msg);
25643 goto deref;
25644 case IB_CM_DREQ_RCVD:
25645- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25646+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25647 counter[CM_DREQ_COUNTER]);
25648 goto unlock;
25649 default:
25650@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25651 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25652 cm_id_priv->msg, timeout)) {
25653 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25654- atomic_long_inc(&work->port->
25655+ atomic_long_inc_unchecked(&work->port->
25656 counter_group[CM_RECV_DUPLICATES].
25657 counter[CM_MRA_COUNTER]);
25658 goto out;
25659@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25660 break;
25661 case IB_CM_MRA_REQ_RCVD:
25662 case IB_CM_MRA_REP_RCVD:
25663- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25664+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25665 counter[CM_MRA_COUNTER]);
25666 /* fall through */
25667 default:
25668@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25669 case IB_CM_LAP_IDLE:
25670 break;
25671 case IB_CM_MRA_LAP_SENT:
25672- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25673+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25674 counter[CM_LAP_COUNTER]);
25675 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25676 goto unlock;
25677@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25678 cm_free_msg(msg);
25679 goto deref;
25680 case IB_CM_LAP_RCVD:
25681- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25682+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25683 counter[CM_LAP_COUNTER]);
25684 goto unlock;
25685 default:
25686@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25687 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25688 if (cur_cm_id_priv) {
25689 spin_unlock_irq(&cm.lock);
25690- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25691+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25692 counter[CM_SIDR_REQ_COUNTER]);
25693 goto out; /* Duplicate message. */
25694 }
25695@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25696 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25697 msg->retries = 1;
25698
25699- atomic_long_add(1 + msg->retries,
25700+ atomic_long_add_unchecked(1 + msg->retries,
25701 &port->counter_group[CM_XMIT].counter[attr_index]);
25702 if (msg->retries)
25703- atomic_long_add(msg->retries,
25704+ atomic_long_add_unchecked(msg->retries,
25705 &port->counter_group[CM_XMIT_RETRIES].
25706 counter[attr_index]);
25707
25708@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25709 }
25710
25711 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25712- atomic_long_inc(&port->counter_group[CM_RECV].
25713+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25714 counter[attr_id - CM_ATTR_ID_OFFSET]);
25715
25716 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25717@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25718 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25719
25720 return sprintf(buf, "%ld\n",
25721- atomic_long_read(&group->counter[cm_attr->index]));
25722+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25723 }
25724
25725 static const struct sysfs_ops cm_counter_ops = {
25726diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25727--- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25728+++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25729@@ -97,8 +97,8 @@ struct ib_fmr_pool {
25730
25731 struct task_struct *thread;
25732
25733- atomic_t req_ser;
25734- atomic_t flush_ser;
25735+ atomic_unchecked_t req_ser;
25736+ atomic_unchecked_t flush_ser;
25737
25738 wait_queue_head_t force_wait;
25739 };
25740@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25741 struct ib_fmr_pool *pool = pool_ptr;
25742
25743 do {
25744- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25745+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25746 ib_fmr_batch_release(pool);
25747
25748- atomic_inc(&pool->flush_ser);
25749+ atomic_inc_unchecked(&pool->flush_ser);
25750 wake_up_interruptible(&pool->force_wait);
25751
25752 if (pool->flush_function)
25753@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25754 }
25755
25756 set_current_state(TASK_INTERRUPTIBLE);
25757- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25758+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25759 !kthread_should_stop())
25760 schedule();
25761 __set_current_state(TASK_RUNNING);
25762@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25763 pool->dirty_watermark = params->dirty_watermark;
25764 pool->dirty_len = 0;
25765 spin_lock_init(&pool->pool_lock);
25766- atomic_set(&pool->req_ser, 0);
25767- atomic_set(&pool->flush_ser, 0);
25768+ atomic_set_unchecked(&pool->req_ser, 0);
25769+ atomic_set_unchecked(&pool->flush_ser, 0);
25770 init_waitqueue_head(&pool->force_wait);
25771
25772 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25773@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25774 }
25775 spin_unlock_irq(&pool->pool_lock);
25776
25777- serial = atomic_inc_return(&pool->req_ser);
25778+ serial = atomic_inc_return_unchecked(&pool->req_ser);
25779 wake_up_process(pool->thread);
25780
25781 if (wait_event_interruptible(pool->force_wait,
25782- atomic_read(&pool->flush_ser) - serial >= 0))
25783+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25784 return -EINTR;
25785
25786 return 0;
25787@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25788 } else {
25789 list_add_tail(&fmr->list, &pool->dirty_list);
25790 if (++pool->dirty_len >= pool->dirty_watermark) {
25791- atomic_inc(&pool->req_ser);
25792+ atomic_inc_unchecked(&pool->req_ser);
25793 wake_up_process(pool->thread);
25794 }
25795 }
25796diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25797--- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25798+++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25799@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25800 int err;
25801 struct fw_ri_tpte tpt;
25802 u32 stag_idx;
25803- static atomic_t key;
25804+ static atomic_unchecked_t key;
25805
25806 if (c4iw_fatal_error(rdev))
25807 return -EIO;
25808@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25809 &rdev->resource.tpt_fifo_lock);
25810 if (!stag_idx)
25811 return -ENOMEM;
25812- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25813+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25814 }
25815 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25816 __func__, stag_state, type, pdid, stag_idx);
25817diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25818--- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25819+++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25820@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25821 struct infinipath_counters counters;
25822 struct ipath_devdata *dd;
25823
25824+ pax_track_stack();
25825+
25826 dd = file->f_path.dentry->d_inode->i_private;
25827 dd->ipath_f_read_counters(dd, &counters);
25828
25829diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25830--- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25831+++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25832@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25833 struct ib_atomic_eth *ateth;
25834 struct ipath_ack_entry *e;
25835 u64 vaddr;
25836- atomic64_t *maddr;
25837+ atomic64_unchecked_t *maddr;
25838 u64 sdata;
25839 u32 rkey;
25840 u8 next;
25841@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25842 IB_ACCESS_REMOTE_ATOMIC)))
25843 goto nack_acc_unlck;
25844 /* Perform atomic OP and save result. */
25845- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25846+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25847 sdata = be64_to_cpu(ateth->swap_data);
25848 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25849 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25850- (u64) atomic64_add_return(sdata, maddr) - sdata :
25851+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25852 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25853 be64_to_cpu(ateth->compare_data),
25854 sdata);
25855diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25856--- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25857+++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25858@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25859 unsigned long flags;
25860 struct ib_wc wc;
25861 u64 sdata;
25862- atomic64_t *maddr;
25863+ atomic64_unchecked_t *maddr;
25864 enum ib_wc_status send_status;
25865
25866 /*
25867@@ -382,11 +382,11 @@ again:
25868 IB_ACCESS_REMOTE_ATOMIC)))
25869 goto acc_err;
25870 /* Perform atomic OP and save result. */
25871- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25872+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25873 sdata = wqe->wr.wr.atomic.compare_add;
25874 *(u64 *) sqp->s_sge.sge.vaddr =
25875 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25876- (u64) atomic64_add_return(sdata, maddr) - sdata :
25877+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25878 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25879 sdata, wqe->wr.wr.atomic.swap);
25880 goto send_comp;
25881diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25882--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25883+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25884@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25885 LIST_HEAD(nes_adapter_list);
25886 static LIST_HEAD(nes_dev_list);
25887
25888-atomic_t qps_destroyed;
25889+atomic_unchecked_t qps_destroyed;
25890
25891 static unsigned int ee_flsh_adapter;
25892 static unsigned int sysfs_nonidx_addr;
25893@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25894 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25895 struct nes_adapter *nesadapter = nesdev->nesadapter;
25896
25897- atomic_inc(&qps_destroyed);
25898+ atomic_inc_unchecked(&qps_destroyed);
25899
25900 /* Free the control structures */
25901
25902diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25903--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25904+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25905@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25906 u32 cm_packets_retrans;
25907 u32 cm_packets_created;
25908 u32 cm_packets_received;
25909-atomic_t cm_listens_created;
25910-atomic_t cm_listens_destroyed;
25911+atomic_unchecked_t cm_listens_created;
25912+atomic_unchecked_t cm_listens_destroyed;
25913 u32 cm_backlog_drops;
25914-atomic_t cm_loopbacks;
25915-atomic_t cm_nodes_created;
25916-atomic_t cm_nodes_destroyed;
25917-atomic_t cm_accel_dropped_pkts;
25918-atomic_t cm_resets_recvd;
25919+atomic_unchecked_t cm_loopbacks;
25920+atomic_unchecked_t cm_nodes_created;
25921+atomic_unchecked_t cm_nodes_destroyed;
25922+atomic_unchecked_t cm_accel_dropped_pkts;
25923+atomic_unchecked_t cm_resets_recvd;
25924
25925 static inline int mini_cm_accelerated(struct nes_cm_core *,
25926 struct nes_cm_node *);
25927@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25928
25929 static struct nes_cm_core *g_cm_core;
25930
25931-atomic_t cm_connects;
25932-atomic_t cm_accepts;
25933-atomic_t cm_disconnects;
25934-atomic_t cm_closes;
25935-atomic_t cm_connecteds;
25936-atomic_t cm_connect_reqs;
25937-atomic_t cm_rejects;
25938+atomic_unchecked_t cm_connects;
25939+atomic_unchecked_t cm_accepts;
25940+atomic_unchecked_t cm_disconnects;
25941+atomic_unchecked_t cm_closes;
25942+atomic_unchecked_t cm_connecteds;
25943+atomic_unchecked_t cm_connect_reqs;
25944+atomic_unchecked_t cm_rejects;
25945
25946
25947 /**
25948@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25949 kfree(listener);
25950 listener = NULL;
25951 ret = 0;
25952- atomic_inc(&cm_listens_destroyed);
25953+ atomic_inc_unchecked(&cm_listens_destroyed);
25954 } else {
25955 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25956 }
25957@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25958 cm_node->rem_mac);
25959
25960 add_hte_node(cm_core, cm_node);
25961- atomic_inc(&cm_nodes_created);
25962+ atomic_inc_unchecked(&cm_nodes_created);
25963
25964 return cm_node;
25965 }
25966@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25967 }
25968
25969 atomic_dec(&cm_core->node_cnt);
25970- atomic_inc(&cm_nodes_destroyed);
25971+ atomic_inc_unchecked(&cm_nodes_destroyed);
25972 nesqp = cm_node->nesqp;
25973 if (nesqp) {
25974 nesqp->cm_node = NULL;
25975@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25976
25977 static void drop_packet(struct sk_buff *skb)
25978 {
25979- atomic_inc(&cm_accel_dropped_pkts);
25980+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25981 dev_kfree_skb_any(skb);
25982 }
25983
25984@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25985 {
25986
25987 int reset = 0; /* whether to send reset in case of err.. */
25988- atomic_inc(&cm_resets_recvd);
25989+ atomic_inc_unchecked(&cm_resets_recvd);
25990 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25991 " refcnt=%d\n", cm_node, cm_node->state,
25992 atomic_read(&cm_node->ref_count));
25993@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25994 rem_ref_cm_node(cm_node->cm_core, cm_node);
25995 return NULL;
25996 }
25997- atomic_inc(&cm_loopbacks);
25998+ atomic_inc_unchecked(&cm_loopbacks);
25999 loopbackremotenode->loopbackpartner = cm_node;
26000 loopbackremotenode->tcp_cntxt.rcv_wscale =
26001 NES_CM_DEFAULT_RCV_WND_SCALE;
26002@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26003 add_ref_cm_node(cm_node);
26004 } else if (cm_node->state == NES_CM_STATE_TSA) {
26005 rem_ref_cm_node(cm_core, cm_node);
26006- atomic_inc(&cm_accel_dropped_pkts);
26007+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
26008 dev_kfree_skb_any(skb);
26009 break;
26010 }
26011@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26012
26013 if ((cm_id) && (cm_id->event_handler)) {
26014 if (issue_disconn) {
26015- atomic_inc(&cm_disconnects);
26016+ atomic_inc_unchecked(&cm_disconnects);
26017 cm_event.event = IW_CM_EVENT_DISCONNECT;
26018 cm_event.status = disconn_status;
26019 cm_event.local_addr = cm_id->local_addr;
26020@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26021 }
26022
26023 if (issue_close) {
26024- atomic_inc(&cm_closes);
26025+ atomic_inc_unchecked(&cm_closes);
26026 nes_disconnect(nesqp, 1);
26027
26028 cm_id->provider_data = nesqp;
26029@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26030
26031 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26032 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26033- atomic_inc(&cm_accepts);
26034+ atomic_inc_unchecked(&cm_accepts);
26035
26036 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26037 netdev_refcnt_read(nesvnic->netdev));
26038@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26039
26040 struct nes_cm_core *cm_core;
26041
26042- atomic_inc(&cm_rejects);
26043+ atomic_inc_unchecked(&cm_rejects);
26044 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26045 loopback = cm_node->loopbackpartner;
26046 cm_core = cm_node->cm_core;
26047@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26048 ntohl(cm_id->local_addr.sin_addr.s_addr),
26049 ntohs(cm_id->local_addr.sin_port));
26050
26051- atomic_inc(&cm_connects);
26052+ atomic_inc_unchecked(&cm_connects);
26053 nesqp->active_conn = 1;
26054
26055 /* cache the cm_id in the qp */
26056@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26057 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26058 return err;
26059 }
26060- atomic_inc(&cm_listens_created);
26061+ atomic_inc_unchecked(&cm_listens_created);
26062 }
26063
26064 cm_id->add_ref(cm_id);
26065@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26066 if (nesqp->destroyed) {
26067 return;
26068 }
26069- atomic_inc(&cm_connecteds);
26070+ atomic_inc_unchecked(&cm_connecteds);
26071 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26072 " local port 0x%04X. jiffies = %lu.\n",
26073 nesqp->hwqp.qp_id,
26074@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26075
26076 cm_id->add_ref(cm_id);
26077 ret = cm_id->event_handler(cm_id, &cm_event);
26078- atomic_inc(&cm_closes);
26079+ atomic_inc_unchecked(&cm_closes);
26080 cm_event.event = IW_CM_EVENT_CLOSE;
26081 cm_event.status = IW_CM_EVENT_STATUS_OK;
26082 cm_event.provider_data = cm_id->provider_data;
26083@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26084 return;
26085 cm_id = cm_node->cm_id;
26086
26087- atomic_inc(&cm_connect_reqs);
26088+ atomic_inc_unchecked(&cm_connect_reqs);
26089 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26090 cm_node, cm_id, jiffies);
26091
26092@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26093 return;
26094 cm_id = cm_node->cm_id;
26095
26096- atomic_inc(&cm_connect_reqs);
26097+ atomic_inc_unchecked(&cm_connect_reqs);
26098 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26099 cm_node, cm_id, jiffies);
26100
26101diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26102--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26103+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26104@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26105 extern unsigned int wqm_quanta;
26106 extern struct list_head nes_adapter_list;
26107
26108-extern atomic_t cm_connects;
26109-extern atomic_t cm_accepts;
26110-extern atomic_t cm_disconnects;
26111-extern atomic_t cm_closes;
26112-extern atomic_t cm_connecteds;
26113-extern atomic_t cm_connect_reqs;
26114-extern atomic_t cm_rejects;
26115-extern atomic_t mod_qp_timouts;
26116-extern atomic_t qps_created;
26117-extern atomic_t qps_destroyed;
26118-extern atomic_t sw_qps_destroyed;
26119+extern atomic_unchecked_t cm_connects;
26120+extern atomic_unchecked_t cm_accepts;
26121+extern atomic_unchecked_t cm_disconnects;
26122+extern atomic_unchecked_t cm_closes;
26123+extern atomic_unchecked_t cm_connecteds;
26124+extern atomic_unchecked_t cm_connect_reqs;
26125+extern atomic_unchecked_t cm_rejects;
26126+extern atomic_unchecked_t mod_qp_timouts;
26127+extern atomic_unchecked_t qps_created;
26128+extern atomic_unchecked_t qps_destroyed;
26129+extern atomic_unchecked_t sw_qps_destroyed;
26130 extern u32 mh_detected;
26131 extern u32 mh_pauses_sent;
26132 extern u32 cm_packets_sent;
26133@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26134 extern u32 cm_packets_received;
26135 extern u32 cm_packets_dropped;
26136 extern u32 cm_packets_retrans;
26137-extern atomic_t cm_listens_created;
26138-extern atomic_t cm_listens_destroyed;
26139+extern atomic_unchecked_t cm_listens_created;
26140+extern atomic_unchecked_t cm_listens_destroyed;
26141 extern u32 cm_backlog_drops;
26142-extern atomic_t cm_loopbacks;
26143-extern atomic_t cm_nodes_created;
26144-extern atomic_t cm_nodes_destroyed;
26145-extern atomic_t cm_accel_dropped_pkts;
26146-extern atomic_t cm_resets_recvd;
26147+extern atomic_unchecked_t cm_loopbacks;
26148+extern atomic_unchecked_t cm_nodes_created;
26149+extern atomic_unchecked_t cm_nodes_destroyed;
26150+extern atomic_unchecked_t cm_accel_dropped_pkts;
26151+extern atomic_unchecked_t cm_resets_recvd;
26152
26153 extern u32 int_mod_timer_init;
26154 extern u32 int_mod_cq_depth_256;
26155diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26156--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26157+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26158@@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26159 target_stat_values[++index] = mh_detected;
26160 target_stat_values[++index] = mh_pauses_sent;
26161 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26162- target_stat_values[++index] = atomic_read(&cm_connects);
26163- target_stat_values[++index] = atomic_read(&cm_accepts);
26164- target_stat_values[++index] = atomic_read(&cm_disconnects);
26165- target_stat_values[++index] = atomic_read(&cm_connecteds);
26166- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26167- target_stat_values[++index] = atomic_read(&cm_rejects);
26168- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26169- target_stat_values[++index] = atomic_read(&qps_created);
26170- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26171- target_stat_values[++index] = atomic_read(&qps_destroyed);
26172- target_stat_values[++index] = atomic_read(&cm_closes);
26173+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26174+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26175+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26176+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26177+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26178+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26179+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26180+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26181+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26182+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26183+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26184 target_stat_values[++index] = cm_packets_sent;
26185 target_stat_values[++index] = cm_packets_bounced;
26186 target_stat_values[++index] = cm_packets_created;
26187 target_stat_values[++index] = cm_packets_received;
26188 target_stat_values[++index] = cm_packets_dropped;
26189 target_stat_values[++index] = cm_packets_retrans;
26190- target_stat_values[++index] = atomic_read(&cm_listens_created);
26191- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26192+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26193+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26194 target_stat_values[++index] = cm_backlog_drops;
26195- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26196- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26197- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26198- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26199- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26200+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26201+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26202+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26203+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26204+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26205 target_stat_values[++index] = nesadapter->free_4kpbl;
26206 target_stat_values[++index] = nesadapter->free_256pbl;
26207 target_stat_values[++index] = int_mod_timer_init;
26208diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26209--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26210+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26211@@ -46,9 +46,9 @@
26212
26213 #include <rdma/ib_umem.h>
26214
26215-atomic_t mod_qp_timouts;
26216-atomic_t qps_created;
26217-atomic_t sw_qps_destroyed;
26218+atomic_unchecked_t mod_qp_timouts;
26219+atomic_unchecked_t qps_created;
26220+atomic_unchecked_t sw_qps_destroyed;
26221
26222 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26223
26224@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26225 if (init_attr->create_flags)
26226 return ERR_PTR(-EINVAL);
26227
26228- atomic_inc(&qps_created);
26229+ atomic_inc_unchecked(&qps_created);
26230 switch (init_attr->qp_type) {
26231 case IB_QPT_RC:
26232 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26233@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26234 struct iw_cm_event cm_event;
26235 int ret;
26236
26237- atomic_inc(&sw_qps_destroyed);
26238+ atomic_inc_unchecked(&sw_qps_destroyed);
26239 nesqp->destroyed = 1;
26240
26241 /* Blow away the connection if it exists. */
26242diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26243--- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26244+++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26245@@ -51,6 +51,7 @@
26246 #include <linux/completion.h>
26247 #include <linux/kref.h>
26248 #include <linux/sched.h>
26249+#include <linux/slab.h>
26250
26251 #include "qib_common.h"
26252 #include "qib_verbs.h"
26253diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26254--- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26255+++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26256@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26257 */
26258 static void gameport_init_port(struct gameport *gameport)
26259 {
26260- static atomic_t gameport_no = ATOMIC_INIT(0);
26261+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26262
26263 __module_get(THIS_MODULE);
26264
26265 mutex_init(&gameport->drv_mutex);
26266 device_initialize(&gameport->dev);
26267 dev_set_name(&gameport->dev, "gameport%lu",
26268- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26269+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26270 gameport->dev.bus = &gameport_bus;
26271 gameport->dev.release = gameport_release_port;
26272 if (gameport->parent)
26273diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26274--- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26275+++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26276@@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26277 */
26278 int input_register_device(struct input_dev *dev)
26279 {
26280- static atomic_t input_no = ATOMIC_INIT(0);
26281+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26282 struct input_handler *handler;
26283 const char *path;
26284 int error;
26285@@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26286 dev->setkeycode = input_default_setkeycode;
26287
26288 dev_set_name(&dev->dev, "input%ld",
26289- (unsigned long) atomic_inc_return(&input_no) - 1);
26290+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26291
26292 error = device_add(&dev->dev);
26293 if (error)
26294diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26295--- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26296+++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26297@@ -30,6 +30,7 @@
26298 #include <linux/kernel.h>
26299 #include <linux/module.h>
26300 #include <linux/slab.h>
26301+#include <linux/sched.h>
26302 #include <linux/init.h>
26303 #include <linux/input.h>
26304 #include <linux/gameport.h>
26305@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26306 unsigned char buf[SW_LENGTH];
26307 int i;
26308
26309+ pax_track_stack();
26310+
26311 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26312
26313 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26314diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26315--- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26316+++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26317@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26318
26319 static int xpad_led_probe(struct usb_xpad *xpad)
26320 {
26321- static atomic_t led_seq = ATOMIC_INIT(0);
26322+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26323 long led_no;
26324 struct xpad_led *led;
26325 struct led_classdev *led_cdev;
26326@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26327 if (!led)
26328 return -ENOMEM;
26329
26330- led_no = (long)atomic_inc_return(&led_seq) - 1;
26331+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26332
26333 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26334 led->xpad = xpad;
26335diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26336--- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26337+++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26338@@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26339
26340 spin_unlock_irq(&client->packet_lock);
26341
26342- if (copy_to_user(buffer, data, count))
26343+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26344 return -EFAULT;
26345
26346 return count;
26347diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26348--- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26349+++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26350@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26351 */
26352 static void serio_init_port(struct serio *serio)
26353 {
26354- static atomic_t serio_no = ATOMIC_INIT(0);
26355+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26356
26357 __module_get(THIS_MODULE);
26358
26359@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26360 mutex_init(&serio->drv_mutex);
26361 device_initialize(&serio->dev);
26362 dev_set_name(&serio->dev, "serio%ld",
26363- (long)atomic_inc_return(&serio_no) - 1);
26364+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26365 serio->dev.bus = &serio_bus;
26366 serio->dev.release = serio_release_port;
26367 serio->dev.groups = serio_device_attr_groups;
26368diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26369--- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26370+++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26371@@ -89,8 +89,8 @@ struct capiminor {
26372
26373 struct capi20_appl *ap;
26374 u32 ncci;
26375- atomic_t datahandle;
26376- atomic_t msgid;
26377+ atomic_unchecked_t datahandle;
26378+ atomic_unchecked_t msgid;
26379
26380 struct tty_port port;
26381 int ttyinstop;
26382@@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26383 capimsg_setu16(s, 2, mp->ap->applid);
26384 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26385 capimsg_setu8 (s, 5, CAPI_RESP);
26386- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26387+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26388 capimsg_setu32(s, 8, mp->ncci);
26389 capimsg_setu16(s, 12, datahandle);
26390 }
26391@@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26392 mp->outbytes -= len;
26393 spin_unlock_bh(&mp->outlock);
26394
26395- datahandle = atomic_inc_return(&mp->datahandle);
26396+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26397 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26398 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26399 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26400 capimsg_setu16(skb->data, 2, mp->ap->applid);
26401 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26402 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26403- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26404+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26405 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26406 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26407 capimsg_setu16(skb->data, 16, len); /* Data length */
26408diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26409--- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26410+++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26411@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26412 cs->commands_pending = 0;
26413 cs->cur_at_seq = 0;
26414 cs->gotfwver = -1;
26415- cs->open_count = 0;
26416+ local_set(&cs->open_count, 0);
26417 cs->dev = NULL;
26418 cs->tty = NULL;
26419 cs->tty_dev = NULL;
26420diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26421--- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26422+++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26423@@ -35,6 +35,7 @@
26424 #include <linux/tty_driver.h>
26425 #include <linux/list.h>
26426 #include <asm/atomic.h>
26427+#include <asm/local.h>
26428
26429 #define GIG_VERSION {0, 5, 0, 0}
26430 #define GIG_COMPAT {0, 4, 0, 0}
26431@@ -433,7 +434,7 @@ struct cardstate {
26432 spinlock_t cmdlock;
26433 unsigned curlen, cmdbytes;
26434
26435- unsigned open_count;
26436+ local_t open_count;
26437 struct tty_struct *tty;
26438 struct tasklet_struct if_wake_tasklet;
26439 unsigned control_state;
26440diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26441--- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26442+++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26443@@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26444 return -ERESTARTSYS;
26445 tty->driver_data = cs;
26446
26447- ++cs->open_count;
26448-
26449- if (cs->open_count == 1) {
26450+ if (local_inc_return(&cs->open_count) == 1) {
26451 spin_lock_irqsave(&cs->lock, flags);
26452 cs->tty = tty;
26453 spin_unlock_irqrestore(&cs->lock, flags);
26454@@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26455
26456 if (!cs->connected)
26457 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26458- else if (!cs->open_count)
26459+ else if (!local_read(&cs->open_count))
26460 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26461 else {
26462- if (!--cs->open_count) {
26463+ if (!local_dec_return(&cs->open_count)) {
26464 spin_lock_irqsave(&cs->lock, flags);
26465 cs->tty = NULL;
26466 spin_unlock_irqrestore(&cs->lock, flags);
26467@@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26468 if (!cs->connected) {
26469 gig_dbg(DEBUG_IF, "not connected");
26470 retval = -ENODEV;
26471- } else if (!cs->open_count)
26472+ } else if (!local_read(&cs->open_count))
26473 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26474 else {
26475 retval = 0;
26476@@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26477 retval = -ENODEV;
26478 goto done;
26479 }
26480- if (!cs->open_count) {
26481+ if (!local_read(&cs->open_count)) {
26482 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26483 retval = -ENODEV;
26484 goto done;
26485@@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26486 if (!cs->connected) {
26487 gig_dbg(DEBUG_IF, "not connected");
26488 retval = -ENODEV;
26489- } else if (!cs->open_count)
26490+ } else if (!local_read(&cs->open_count))
26491 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26492 else if (cs->mstate != MS_LOCKED) {
26493 dev_warn(cs->dev, "can't write to unlocked device\n");
26494@@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26495
26496 if (!cs->connected)
26497 gig_dbg(DEBUG_IF, "not connected");
26498- else if (!cs->open_count)
26499+ else if (!local_read(&cs->open_count))
26500 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26501 else if (cs->mstate != MS_LOCKED)
26502 dev_warn(cs->dev, "can't write to unlocked device\n");
26503@@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26504
26505 if (!cs->connected)
26506 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26507- else if (!cs->open_count)
26508+ else if (!local_read(&cs->open_count))
26509 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26510 else
26511 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26512@@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26513
26514 if (!cs->connected)
26515 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26516- else if (!cs->open_count)
26517+ else if (!local_read(&cs->open_count))
26518 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26519 else
26520 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26521@@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26522 goto out;
26523 }
26524
26525- if (!cs->open_count) {
26526+ if (!local_read(&cs->open_count)) {
26527 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26528 goto out;
26529 }
26530diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26531--- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26532+++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26533@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26534 }
26535 if (left) {
26536 if (t4file->user) {
26537- if (copy_from_user(buf, dp, left))
26538+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26539 return -EFAULT;
26540 } else {
26541 memcpy(buf, dp, left);
26542@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26543 }
26544 if (left) {
26545 if (config->user) {
26546- if (copy_from_user(buf, dp, left))
26547+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26548 return -EFAULT;
26549 } else {
26550 memcpy(buf, dp, left);
26551diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26552--- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26553+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26554@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26555 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26556 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26557
26558+ pax_track_stack();
26559
26560 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26561 {
26562diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26563--- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26564+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26565@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26566 IDI_SYNC_REQ req;
26567 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26568
26569+ pax_track_stack();
26570+
26571 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26572
26573 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26574diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26575--- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26576+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26577@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26578 IDI_SYNC_REQ req;
26579 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26580
26581+ pax_track_stack();
26582+
26583 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26584
26585 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26586diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26587--- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26588+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26589@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26590 IDI_SYNC_REQ req;
26591 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26592
26593+ pax_track_stack();
26594+
26595 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26596
26597 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26598diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26599--- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26600+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26601@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26602 } diva_didd_add_adapter_t;
26603 typedef struct _diva_didd_remove_adapter {
26604 IDI_CALL p_request;
26605-} diva_didd_remove_adapter_t;
26606+} __no_const diva_didd_remove_adapter_t;
26607 typedef struct _diva_didd_read_adapter_array {
26608 void * buffer;
26609 dword length;
26610diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26611--- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26612+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26613@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26614 IDI_SYNC_REQ req;
26615 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26616
26617+ pax_track_stack();
26618+
26619 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26620
26621 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26622diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26623--- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26624+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26625@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26626 dword d;
26627 word w;
26628
26629+ pax_track_stack();
26630+
26631 a = plci->adapter;
26632 Id = ((word)plci->Id<<8)|a->Id;
26633 PUT_WORD(&SS_Ind[4],0x0000);
26634@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26635 word j, n, w;
26636 dword d;
26637
26638+ pax_track_stack();
26639+
26640
26641 for(i=0;i<8;i++) bp_parms[i].length = 0;
26642 for(i=0;i<2;i++) global_config[i].length = 0;
26643@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26644 const byte llc3[] = {4,3,2,2,6,6,0};
26645 const byte header[] = {0,2,3,3,0,0,0};
26646
26647+ pax_track_stack();
26648+
26649 for(i=0;i<8;i++) bp_parms[i].length = 0;
26650 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26651 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26652@@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26653 word appl_number_group_type[MAX_APPL];
26654 PLCI *auxplci;
26655
26656+ pax_track_stack();
26657+
26658 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26659
26660 if(!a->group_optimization_enabled)
26661diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26662--- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26663+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26664@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26665 IDI_SYNC_REQ req;
26666 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26667
26668+ pax_track_stack();
26669+
26670 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26671
26672 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26673diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26674--- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26675+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26676@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26677 typedef struct _diva_os_idi_adapter_interface {
26678 diva_init_card_proc_t cleanup_adapter_proc;
26679 diva_cmd_card_proc_t cmd_proc;
26680-} diva_os_idi_adapter_interface_t;
26681+} __no_const diva_os_idi_adapter_interface_t;
26682
26683 typedef struct _diva_os_xdi_adapter {
26684 struct list_head link;
26685diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26686--- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26687+++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26688@@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26689 } iocpar;
26690 void __user *argp = (void __user *)arg;
26691
26692+ pax_track_stack();
26693+
26694 #define name iocpar.name
26695 #define bname iocpar.bname
26696 #define iocts iocpar.iocts
26697diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26698--- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26699+++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26700@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26701 if (count > len)
26702 count = len;
26703 if (user) {
26704- if (copy_from_user(msg, buf, count))
26705+ if (count > sizeof msg || copy_from_user(msg, buf, count))
26706 return -EFAULT;
26707 } else
26708 memcpy(msg, buf, count);
26709diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26710--- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26711+++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26712@@ -92,9 +92,17 @@ static __init int map_switcher(void)
26713 * it's worked so far. The end address needs +1 because __get_vm_area
26714 * allocates an extra guard page, so we need space for that.
26715 */
26716+
26717+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26718+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26719+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26720+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26721+#else
26722 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26723 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26724 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26725+#endif
26726+
26727 if (!switcher_vma) {
26728 err = -ENOMEM;
26729 printk("lguest: could not map switcher pages high\n");
26730@@ -119,7 +127,7 @@ static __init int map_switcher(void)
26731 * Now the Switcher is mapped at the right address, we can't fail!
26732 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26733 */
26734- memcpy(switcher_vma->addr, start_switcher_text,
26735+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26736 end_switcher_text - start_switcher_text);
26737
26738 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26739diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26740--- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26741+++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26742@@ -59,7 +59,7 @@ static struct {
26743 /* Offset from where switcher.S was compiled to where we've copied it */
26744 static unsigned long switcher_offset(void)
26745 {
26746- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26747+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26748 }
26749
26750 /* This cpu's struct lguest_pages. */
26751@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26752 * These copies are pretty cheap, so we do them unconditionally: */
26753 /* Save the current Host top-level page directory.
26754 */
26755+
26756+#ifdef CONFIG_PAX_PER_CPU_PGD
26757+ pages->state.host_cr3 = read_cr3();
26758+#else
26759 pages->state.host_cr3 = __pa(current->mm->pgd);
26760+#endif
26761+
26762 /*
26763 * Set up the Guest's page tables to see this CPU's pages (and no
26764 * other CPU's pages).
26765@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26766 * compiled-in switcher code and the high-mapped copy we just made.
26767 */
26768 for (i = 0; i < IDT_ENTRIES; i++)
26769- default_idt_entries[i] += switcher_offset();
26770+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26771
26772 /*
26773 * Set up the Switcher's per-cpu areas.
26774@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26775 * it will be undisturbed when we switch. To change %cs and jump we
26776 * need this structure to feed to Intel's "lcall" instruction.
26777 */
26778- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26779+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26780 lguest_entry.segment = LGUEST_CS;
26781
26782 /*
26783diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26784--- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26785+++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26786@@ -87,6 +87,7 @@
26787 #include <asm/page.h>
26788 #include <asm/segment.h>
26789 #include <asm/lguest.h>
26790+#include <asm/processor-flags.h>
26791
26792 // We mark the start of the code to copy
26793 // It's placed in .text tho it's never run here
26794@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26795 // Changes type when we load it: damn Intel!
26796 // For after we switch over our page tables
26797 // That entry will be read-only: we'd crash.
26798+
26799+#ifdef CONFIG_PAX_KERNEXEC
26800+ mov %cr0, %edx
26801+ xor $X86_CR0_WP, %edx
26802+ mov %edx, %cr0
26803+#endif
26804+
26805 movl $(GDT_ENTRY_TSS*8), %edx
26806 ltr %dx
26807
26808@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26809 // Let's clear it again for our return.
26810 // The GDT descriptor of the Host
26811 // Points to the table after two "size" bytes
26812- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26813+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26814 // Clear "used" from type field (byte 5, bit 2)
26815- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26816+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26817+
26818+#ifdef CONFIG_PAX_KERNEXEC
26819+ mov %cr0, %eax
26820+ xor $X86_CR0_WP, %eax
26821+ mov %eax, %cr0
26822+#endif
26823
26824 // Once our page table's switched, the Guest is live!
26825 // The Host fades as we run this final step.
26826@@ -295,13 +309,12 @@ deliver_to_host:
26827 // I consulted gcc, and it gave
26828 // These instructions, which I gladly credit:
26829 leal (%edx,%ebx,8), %eax
26830- movzwl (%eax),%edx
26831- movl 4(%eax), %eax
26832- xorw %ax, %ax
26833- orl %eax, %edx
26834+ movl 4(%eax), %edx
26835+ movw (%eax), %dx
26836 // Now the address of the handler's in %edx
26837 // We call it now: its "iret" drops us home.
26838- jmp *%edx
26839+ ljmp $__KERNEL_CS, $1f
26840+1: jmp *%edx
26841
26842 // Every interrupt can come to us here
26843 // But we must truly tell each apart.
26844diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26845--- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26846+++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26847@@ -162,9 +162,9 @@ struct mapped_device {
26848 /*
26849 * Event handling.
26850 */
26851- atomic_t event_nr;
26852+ atomic_unchecked_t event_nr;
26853 wait_queue_head_t eventq;
26854- atomic_t uevent_seq;
26855+ atomic_unchecked_t uevent_seq;
26856 struct list_head uevent_list;
26857 spinlock_t uevent_lock; /* Protect access to uevent_list */
26858
26859@@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26860 rwlock_init(&md->map_lock);
26861 atomic_set(&md->holders, 1);
26862 atomic_set(&md->open_count, 0);
26863- atomic_set(&md->event_nr, 0);
26864- atomic_set(&md->uevent_seq, 0);
26865+ atomic_set_unchecked(&md->event_nr, 0);
26866+ atomic_set_unchecked(&md->uevent_seq, 0);
26867 INIT_LIST_HEAD(&md->uevent_list);
26868 spin_lock_init(&md->uevent_lock);
26869
26870@@ -1971,7 +1971,7 @@ static void event_callback(void *context
26871
26872 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26873
26874- atomic_inc(&md->event_nr);
26875+ atomic_inc_unchecked(&md->event_nr);
26876 wake_up(&md->eventq);
26877 }
26878
26879@@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26880
26881 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26882 {
26883- return atomic_add_return(1, &md->uevent_seq);
26884+ return atomic_add_return_unchecked(1, &md->uevent_seq);
26885 }
26886
26887 uint32_t dm_get_event_nr(struct mapped_device *md)
26888 {
26889- return atomic_read(&md->event_nr);
26890+ return atomic_read_unchecked(&md->event_nr);
26891 }
26892
26893 int dm_wait_event(struct mapped_device *md, int event_nr)
26894 {
26895 return wait_event_interruptible(md->eventq,
26896- (event_nr != atomic_read(&md->event_nr)));
26897+ (event_nr != atomic_read_unchecked(&md->event_nr)));
26898 }
26899
26900 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26901diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26902--- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26903+++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26904@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26905 cmd == DM_LIST_VERSIONS_CMD)
26906 return 0;
26907
26908- if ((cmd == DM_DEV_CREATE_CMD)) {
26909+ if (cmd == DM_DEV_CREATE_CMD) {
26910 if (!*param->name) {
26911 DMWARN("name not supplied when creating device");
26912 return -EINVAL;
26913diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26914--- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26915+++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26916@@ -42,7 +42,7 @@ enum dm_raid1_error {
26917
26918 struct mirror {
26919 struct mirror_set *ms;
26920- atomic_t error_count;
26921+ atomic_unchecked_t error_count;
26922 unsigned long error_type;
26923 struct dm_dev *dev;
26924 sector_t offset;
26925@@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26926 struct mirror *m;
26927
26928 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26929- if (!atomic_read(&m->error_count))
26930+ if (!atomic_read_unchecked(&m->error_count))
26931 return m;
26932
26933 return NULL;
26934@@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26935 * simple way to tell if a device has encountered
26936 * errors.
26937 */
26938- atomic_inc(&m->error_count);
26939+ atomic_inc_unchecked(&m->error_count);
26940
26941 if (test_and_set_bit(error_type, &m->error_type))
26942 return;
26943@@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26944 struct mirror *m = get_default_mirror(ms);
26945
26946 do {
26947- if (likely(!atomic_read(&m->error_count)))
26948+ if (likely(!atomic_read_unchecked(&m->error_count)))
26949 return m;
26950
26951 if (m-- == ms->mirror)
26952@@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26953 {
26954 struct mirror *default_mirror = get_default_mirror(m->ms);
26955
26956- return !atomic_read(&default_mirror->error_count);
26957+ return !atomic_read_unchecked(&default_mirror->error_count);
26958 }
26959
26960 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26961@@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
26962 */
26963 if (likely(region_in_sync(ms, region, 1)))
26964 m = choose_mirror(ms, bio->bi_sector);
26965- else if (m && atomic_read(&m->error_count))
26966+ else if (m && atomic_read_unchecked(&m->error_count))
26967 m = NULL;
26968
26969 if (likely(m))
26970@@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
26971 }
26972
26973 ms->mirror[mirror].ms = ms;
26974- atomic_set(&(ms->mirror[mirror].error_count), 0);
26975+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26976 ms->mirror[mirror].error_type = 0;
26977 ms->mirror[mirror].offset = offset;
26978
26979@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26980 */
26981 static char device_status_char(struct mirror *m)
26982 {
26983- if (!atomic_read(&(m->error_count)))
26984+ if (!atomic_read_unchecked(&(m->error_count)))
26985 return 'A';
26986
26987 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26988diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
26989--- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
26990+++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
26991@@ -20,7 +20,7 @@ struct stripe {
26992 struct dm_dev *dev;
26993 sector_t physical_start;
26994
26995- atomic_t error_count;
26996+ atomic_unchecked_t error_count;
26997 };
26998
26999 struct stripe_c {
27000@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27001 kfree(sc);
27002 return r;
27003 }
27004- atomic_set(&(sc->stripe[i].error_count), 0);
27005+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27006 }
27007
27008 ti->private = sc;
27009@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27010 DMEMIT("%d ", sc->stripes);
27011 for (i = 0; i < sc->stripes; i++) {
27012 DMEMIT("%s ", sc->stripe[i].dev->name);
27013- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27014+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27015 'D' : 'A';
27016 }
27017 buffer[i] = '\0';
27018@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27019 */
27020 for (i = 0; i < sc->stripes; i++)
27021 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27022- atomic_inc(&(sc->stripe[i].error_count));
27023- if (atomic_read(&(sc->stripe[i].error_count)) <
27024+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
27025+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27026 DM_IO_ERROR_THRESHOLD)
27027 schedule_work(&sc->trigger_event);
27028 }
27029diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27030--- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27031+++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27032@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27033 if (!dev_size)
27034 return 0;
27035
27036- if ((start >= dev_size) || (start + len > dev_size)) {
27037+ if ((start >= dev_size) || (len > dev_size - start)) {
27038 DMWARN("%s: %s too small for target: "
27039 "start=%llu, len=%llu, dev_size=%llu",
27040 dm_device_name(ti->table->md), bdevname(bdev, b),
27041diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27042--- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27043+++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27044@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27045 * start build, activate spare
27046 */
27047 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27048-static atomic_t md_event_count;
27049+static atomic_unchecked_t md_event_count;
27050 void md_new_event(mddev_t *mddev)
27051 {
27052- atomic_inc(&md_event_count);
27053+ atomic_inc_unchecked(&md_event_count);
27054 wake_up(&md_event_waiters);
27055 }
27056 EXPORT_SYMBOL_GPL(md_new_event);
27057@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27058 */
27059 static void md_new_event_inintr(mddev_t *mddev)
27060 {
27061- atomic_inc(&md_event_count);
27062+ atomic_inc_unchecked(&md_event_count);
27063 wake_up(&md_event_waiters);
27064 }
27065
27066@@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27067
27068 rdev->preferred_minor = 0xffff;
27069 rdev->data_offset = le64_to_cpu(sb->data_offset);
27070- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27071+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27072
27073 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27074 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27075@@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27076 else
27077 sb->resync_offset = cpu_to_le64(0);
27078
27079- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27080+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27081
27082 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27083 sb->size = cpu_to_le64(mddev->dev_sectors);
27084@@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27085 static ssize_t
27086 errors_show(mdk_rdev_t *rdev, char *page)
27087 {
27088- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27089+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27090 }
27091
27092 static ssize_t
27093@@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27094 char *e;
27095 unsigned long n = simple_strtoul(buf, &e, 10);
27096 if (*buf && (*e == 0 || *e == '\n')) {
27097- atomic_set(&rdev->corrected_errors, n);
27098+ atomic_set_unchecked(&rdev->corrected_errors, n);
27099 return len;
27100 }
27101 return -EINVAL;
27102@@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27103 rdev->last_read_error.tv_sec = 0;
27104 rdev->last_read_error.tv_nsec = 0;
27105 atomic_set(&rdev->nr_pending, 0);
27106- atomic_set(&rdev->read_errors, 0);
27107- atomic_set(&rdev->corrected_errors, 0);
27108+ atomic_set_unchecked(&rdev->read_errors, 0);
27109+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27110
27111 INIT_LIST_HEAD(&rdev->same_set);
27112 init_waitqueue_head(&rdev->blocked_wait);
27113@@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27114
27115 spin_unlock(&pers_lock);
27116 seq_printf(seq, "\n");
27117- mi->event = atomic_read(&md_event_count);
27118+ mi->event = atomic_read_unchecked(&md_event_count);
27119 return 0;
27120 }
27121 if (v == (void*)2) {
27122@@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27123 chunk_kb ? "KB" : "B");
27124 if (bitmap->file) {
27125 seq_printf(seq, ", file: ");
27126- seq_path(seq, &bitmap->file->f_path, " \t\n");
27127+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27128 }
27129
27130 seq_printf(seq, "\n");
27131@@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27132 else {
27133 struct seq_file *p = file->private_data;
27134 p->private = mi;
27135- mi->event = atomic_read(&md_event_count);
27136+ mi->event = atomic_read_unchecked(&md_event_count);
27137 }
27138 return error;
27139 }
27140@@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27141 /* always allow read */
27142 mask = POLLIN | POLLRDNORM;
27143
27144- if (mi->event != atomic_read(&md_event_count))
27145+ if (mi->event != atomic_read_unchecked(&md_event_count))
27146 mask |= POLLERR | POLLPRI;
27147 return mask;
27148 }
27149@@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27150 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27151 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27152 (int)part_stat_read(&disk->part0, sectors[1]) -
27153- atomic_read(&disk->sync_io);
27154+ atomic_read_unchecked(&disk->sync_io);
27155 /* sync IO will cause sync_io to increase before the disk_stats
27156 * as sync_io is counted when a request starts, and
27157 * disk_stats is counted when it completes.
27158diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27159--- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27160+++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27161@@ -97,13 +97,13 @@ struct mdk_rdev_s
27162 * only maintained for arrays that
27163 * support hot removal
27164 */
27165- atomic_t read_errors; /* number of consecutive read errors that
27166+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27167 * we have tried to ignore.
27168 */
27169 struct timespec last_read_error; /* monotonic time since our
27170 * last read error
27171 */
27172- atomic_t corrected_errors; /* number of corrected read errors,
27173+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27174 * for reporting to userspace and storing
27175 * in superblock.
27176 */
27177@@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27178
27179 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27180 {
27181- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27182+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27183 }
27184
27185 struct mdk_personality
27186diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27187--- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27188+++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27189@@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27190 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27191 set_bit(R10BIO_Uptodate, &r10_bio->state);
27192 else {
27193- atomic_add(r10_bio->sectors,
27194+ atomic_add_unchecked(r10_bio->sectors,
27195 &conf->mirrors[d].rdev->corrected_errors);
27196 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27197 md_error(r10_bio->mddev,
27198@@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27199 {
27200 struct timespec cur_time_mon;
27201 unsigned long hours_since_last;
27202- unsigned int read_errors = atomic_read(&rdev->read_errors);
27203+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27204
27205 ktime_get_ts(&cur_time_mon);
27206
27207@@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27208 * overflowing the shift of read_errors by hours_since_last.
27209 */
27210 if (hours_since_last >= 8 * sizeof(read_errors))
27211- atomic_set(&rdev->read_errors, 0);
27212+ atomic_set_unchecked(&rdev->read_errors, 0);
27213 else
27214- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27215+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27216 }
27217
27218 /*
27219@@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27220 }
27221
27222 check_decay_read_errors(mddev, rdev);
27223- atomic_inc(&rdev->read_errors);
27224- cur_read_error_count = atomic_read(&rdev->read_errors);
27225+ atomic_inc_unchecked(&rdev->read_errors);
27226+ cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27227 if (cur_read_error_count > max_read_errors) {
27228 rcu_read_unlock();
27229 printk(KERN_NOTICE
27230@@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27231 test_bit(In_sync, &rdev->flags)) {
27232 atomic_inc(&rdev->nr_pending);
27233 rcu_read_unlock();
27234- atomic_add(s, &rdev->corrected_errors);
27235+ atomic_add_unchecked(s, &rdev->corrected_errors);
27236 if (sync_page_io(rdev,
27237 r10_bio->devs[sl].addr +
27238 sect,
27239diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27240--- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27241+++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27242@@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27243 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27244 continue;
27245 rdev = conf->mirrors[d].rdev;
27246- atomic_add(s, &rdev->corrected_errors);
27247+ atomic_add_unchecked(s, &rdev->corrected_errors);
27248 if (sync_page_io(rdev,
27249 sect,
27250 s<<9,
27251@@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27252 /* Well, this device is dead */
27253 md_error(mddev, rdev);
27254 else {
27255- atomic_add(s, &rdev->corrected_errors);
27256+ atomic_add_unchecked(s, &rdev->corrected_errors);
27257 printk(KERN_INFO
27258 "md/raid1:%s: read error corrected "
27259 "(%d sectors at %llu on %s)\n",
27260diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27261--- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27262+++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27263@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27264 bi->bi_next = NULL;
27265 if ((rw & WRITE) &&
27266 test_bit(R5_ReWrite, &sh->dev[i].flags))
27267- atomic_add(STRIPE_SECTORS,
27268+ atomic_add_unchecked(STRIPE_SECTORS,
27269 &rdev->corrected_errors);
27270 generic_make_request(bi);
27271 } else {
27272@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27273 clear_bit(R5_ReadError, &sh->dev[i].flags);
27274 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27275 }
27276- if (atomic_read(&conf->disks[i].rdev->read_errors))
27277- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27278+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27279+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27280 } else {
27281 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27282 int retry = 0;
27283 rdev = conf->disks[i].rdev;
27284
27285 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27286- atomic_inc(&rdev->read_errors);
27287+ atomic_inc_unchecked(&rdev->read_errors);
27288 if (conf->mddev->degraded >= conf->max_degraded)
27289 printk_rl(KERN_WARNING
27290 "md/raid:%s: read error not correctable "
27291@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27292 (unsigned long long)(sh->sector
27293 + rdev->data_offset),
27294 bdn);
27295- else if (atomic_read(&rdev->read_errors)
27296+ else if (atomic_read_unchecked(&rdev->read_errors)
27297 > conf->max_nr_stripes)
27298 printk(KERN_WARNING
27299 "md/raid:%s: Too many read errors, failing device %s.\n",
27300@@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27301 sector_t r_sector;
27302 struct stripe_head sh2;
27303
27304+ pax_track_stack();
27305
27306 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27307 stripe = new_sector;
27308diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27309--- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27310+++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27311@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27312
27313 int x[32], y[32], w[32], h[32];
27314
27315+ pax_track_stack();
27316+
27317 /* clear out memory */
27318 memset(&line_list[0], 0x00, sizeof(u32)*32);
27319 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27320diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27321--- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27322+++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27323@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27324 u8 buf[HOST_LINK_BUF_SIZE];
27325 int i;
27326
27327+ pax_track_stack();
27328+
27329 dprintk("%s\n", __func__);
27330
27331 /* check if we have space for a link buf in the rx_buffer */
27332@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27333 unsigned long timeout;
27334 int written;
27335
27336+ pax_track_stack();
27337+
27338 dprintk("%s\n", __func__);
27339
27340 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27341diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27342--- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27343+++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27344@@ -73,7 +73,7 @@ struct dvb_demux_feed {
27345 union {
27346 dmx_ts_cb ts;
27347 dmx_section_cb sec;
27348- } cb;
27349+ } __no_const cb;
27350
27351 struct dvb_demux *demux;
27352 void *priv;
27353diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27354--- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27355+++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27356@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27357 const struct dvb_device *template, void *priv, int type)
27358 {
27359 struct dvb_device *dvbdev;
27360- struct file_operations *dvbdevfops;
27361+ file_operations_no_const *dvbdevfops;
27362 struct device *clsdev;
27363 int minor;
27364 int id;
27365diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27366--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27367+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27368@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27369 struct dib0700_adapter_state {
27370 int (*set_param_save) (struct dvb_frontend *,
27371 struct dvb_frontend_parameters *);
27372-};
27373+} __no_const;
27374
27375 static int dib7070_set_param_override(struct dvb_frontend *fe,
27376 struct dvb_frontend_parameters *fep)
27377diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27378--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27379+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27380@@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27381
27382 u8 buf[260];
27383
27384+ pax_track_stack();
27385+
27386 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27387 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27388 hx.addr, hx.len, hx.chk);
27389diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27390--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27391+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27392@@ -95,7 +95,7 @@ struct su3000_state {
27393
27394 struct s6x0_state {
27395 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27396-};
27397+} __no_const;
27398
27399 /* debug */
27400 static int dvb_usb_dw2102_debug;
27401diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27402--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27403+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27404@@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27405 packet_size = 0x31;
27406 len_in = 1;
27407
27408+ pax_track_stack();
27409
27410 info("FRM Starting Firmware Download");
27411
27412@@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27413 int ret = 0, len_in;
27414 u8 data[512] = {0};
27415
27416+ pax_track_stack();
27417+
27418 data[0] = 0x0a;
27419 len_in = 1;
27420 info("FRM Firmware Cold Reset");
27421diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27422--- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27423+++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27424@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27425 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27426 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27427 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27428-};
27429+} __no_const;
27430
27431 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27432 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27433diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27434--- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27435+++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27436@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27437 int ret = -1;
27438 int sync;
27439
27440+ pax_track_stack();
27441+
27442 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27443
27444 fcp = 3000;
27445diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27446--- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27447+++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27448@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27449 u8 tudata[585];
27450 int i;
27451
27452+ pax_track_stack();
27453+
27454 dprintk("Firmware is %zd bytes\n",fw->size);
27455
27456 /* Get eprom data */
27457diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27458--- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27459+++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27460@@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27461 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27462 mutex_unlock(&dev->lock);
27463
27464- if (copy_to_user(data, readbuf, i))
27465+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27466 return -EFAULT;
27467 return i;
27468 }
27469diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27470--- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27471+++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27472@@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27473
27474 int rc_register_device(struct rc_dev *dev)
27475 {
27476- static atomic_t devno = ATOMIC_INIT(0);
27477+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
27478 struct rc_map *rc_map;
27479 const char *path;
27480 int rc;
27481@@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27482 if (dev->close)
27483 dev->input_dev->close = ir_close;
27484
27485- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27486+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27487 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27488 dev_set_drvdata(&dev->dev, dev);
27489 rc = device_add(&dev->dev);
27490diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27491--- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27492+++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27493@@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27494
27495 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27496
27497-static atomic_t cx18_instance = ATOMIC_INIT(0);
27498+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27499
27500 /* Parameter declarations */
27501 static int cardtype[CX18_MAX_CARDS];
27502@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27503 struct i2c_client c;
27504 u8 eedata[256];
27505
27506+ pax_track_stack();
27507+
27508 memset(&c, 0, sizeof(c));
27509 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27510 c.adapter = &cx->i2c_adap[0];
27511@@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27512 struct cx18 *cx;
27513
27514 /* FIXME - module parameter arrays constrain max instances */
27515- i = atomic_inc_return(&cx18_instance) - 1;
27516+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27517 if (i >= CX18_MAX_CARDS) {
27518 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27519 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27520diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27521--- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27522+++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27523@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27524 bool handle = false;
27525 struct ir_raw_event ir_core_event[64];
27526
27527+ pax_track_stack();
27528+
27529 do {
27530 num = 0;
27531 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27532diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27533--- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27534+++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27535@@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27536 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27537
27538 /* ivtv instance counter */
27539-static atomic_t ivtv_instance = ATOMIC_INIT(0);
27540+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27541
27542 /* Parameter declarations */
27543 static int cardtype[IVTV_MAX_CARDS];
27544diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27545--- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27546+++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27547@@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27548 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27549
27550 do_gettimeofday(&vb->ts);
27551- vb->field_count = atomic_add_return(2, &fh->field_count);
27552+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27553 if (csr & csr_error) {
27554 vb->state = VIDEOBUF_ERROR;
27555 if (!atomic_read(&fh->cam->in_reset)) {
27556diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27557--- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27558+++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27559@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27560 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27561 struct videobuf_queue vbq;
27562 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27563- atomic_t field_count; /* field counter for videobuf_buffer */
27564+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27565 /* accessing cam here doesn't need serialisation: it's constant */
27566 struct omap24xxcam_device *cam;
27567 };
27568diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27569--- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27570+++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27571@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27572 u8 *eeprom;
27573 struct tveeprom tvdata;
27574
27575+ pax_track_stack();
27576+
27577 memset(&tvdata,0,sizeof(tvdata));
27578
27579 eeprom = pvr2_eeprom_fetch(hdw);
27580diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27581--- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27582+++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27583@@ -196,7 +196,7 @@ struct pvr2_hdw {
27584
27585 /* I2C stuff */
27586 struct i2c_adapter i2c_adap;
27587- struct i2c_algorithm i2c_algo;
27588+ i2c_algorithm_no_const i2c_algo;
27589 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27590 int i2c_cx25840_hack_state;
27591 int i2c_linked;
27592diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27593--- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27594+++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27595@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27596 unsigned char localPAT[256];
27597 unsigned char localPMT[256];
27598
27599+ pax_track_stack();
27600+
27601 /* Set video format - must be done first as it resets other settings */
27602 set_reg8(client, 0x41, h->video_format);
27603
27604diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27605--- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27606+++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27607@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27608 u8 tmp[512];
27609 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27610
27611+ pax_track_stack();
27612+
27613 /* While any outstand message on the bus exists... */
27614 do {
27615
27616@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27617 u8 tmp[512];
27618 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27619
27620+ pax_track_stack();
27621+
27622 while (loop) {
27623
27624 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27625diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27626--- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27627+++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27628@@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27629
27630 /* Platform device functions */
27631
27632-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27633+static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27634 .vidioc_querycap = timblogiw_querycap,
27635 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27636 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27637@@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27638 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27639 };
27640
27641-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27642+static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27643 .owner = THIS_MODULE,
27644 .open = timblogiw_open,
27645 .release = timblogiw_close,
27646diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27647--- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27648+++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27649@@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27650 unsigned char rv, gv, bv;
27651 static unsigned char *Y, *U, *V;
27652
27653+ pax_track_stack();
27654+
27655 frame = usbvision->cur_frame;
27656 image_size = frame->frmwidth * frame->frmheight;
27657 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27658diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27659--- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27660+++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27661@@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27662 EXPORT_SYMBOL_GPL(v4l2_device_put);
27663
27664 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27665- atomic_t *instance)
27666+ atomic_unchecked_t *instance)
27667 {
27668- int num = atomic_inc_return(instance) - 1;
27669+ int num = atomic_inc_return_unchecked(instance) - 1;
27670 int len = strlen(basename);
27671
27672 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27673diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27674--- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27675+++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27676@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27677 {
27678 struct videobuf_queue q;
27679
27680+ pax_track_stack();
27681+
27682 /* Required to make generic handler to call __videobuf_alloc */
27683 q.int_ops = &sg_ops;
27684
27685diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27686--- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27687+++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27688@@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27689 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27690 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27691
27692+#ifdef CONFIG_GRKERNSEC_HIDESYM
27693+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27694+#else
27695 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27696 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27697+#endif
27698+
27699 /*
27700 * Rounding UP to nearest 4-kB boundary here...
27701 */
27702diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27703--- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27704+++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27705@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27706 return 0;
27707 }
27708
27709+static inline void
27710+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27711+{
27712+ if (phy_info->port_details) {
27713+ phy_info->port_details->rphy = rphy;
27714+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27715+ ioc->name, rphy));
27716+ }
27717+
27718+ if (rphy) {
27719+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27720+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27721+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27722+ ioc->name, rphy, rphy->dev.release));
27723+ }
27724+}
27725+
27726 /* no mutex */
27727 static void
27728 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27729@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27730 return NULL;
27731 }
27732
27733-static inline void
27734-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27735-{
27736- if (phy_info->port_details) {
27737- phy_info->port_details->rphy = rphy;
27738- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27739- ioc->name, rphy));
27740- }
27741-
27742- if (rphy) {
27743- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27744- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27745- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27746- ioc->name, rphy, rphy->dev.release));
27747- }
27748-}
27749-
27750 static inline struct sas_port *
27751 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27752 {
27753diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27754--- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27755+++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27756@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27757
27758 h = shost_priv(SChost);
27759
27760- if (h) {
27761- if (h->info_kbuf == NULL)
27762- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27763- return h->info_kbuf;
27764- h->info_kbuf[0] = '\0';
27765+ if (!h)
27766+ return NULL;
27767
27768- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27769- h->info_kbuf[size-1] = '\0';
27770- }
27771+ if (h->info_kbuf == NULL)
27772+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27773+ return h->info_kbuf;
27774+ h->info_kbuf[0] = '\0';
27775+
27776+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27777+ h->info_kbuf[size-1] = '\0';
27778
27779 return h->info_kbuf;
27780 }
27781diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27782--- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27783+++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27784@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27785 struct i2o_message *msg;
27786 unsigned int iop;
27787
27788+ pax_track_stack();
27789+
27790 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27791 return -EFAULT;
27792
27793diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27794--- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27795+++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27796@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27797 "Array Controller Device"
27798 };
27799
27800-static char *chtostr(u8 * chars, int n)
27801-{
27802- char tmp[256];
27803- tmp[0] = 0;
27804- return strncat(tmp, (char *)chars, n);
27805-}
27806-
27807 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27808 char *group)
27809 {
27810@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27811
27812 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27813 seq_printf(seq, "%-#8x", ddm_table.module_id);
27814- seq_printf(seq, "%-29s",
27815- chtostr(ddm_table.module_name_version, 28));
27816+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27817 seq_printf(seq, "%9d ", ddm_table.data_size);
27818 seq_printf(seq, "%8d", ddm_table.code_size);
27819
27820@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27821
27822 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27823 seq_printf(seq, "%-#8x", dst->module_id);
27824- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27825- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27826+ seq_printf(seq, "%-.28s", dst->module_name_version);
27827+ seq_printf(seq, "%-.8s", dst->date);
27828 seq_printf(seq, "%8d ", dst->module_size);
27829 seq_printf(seq, "%8d ", dst->mpb_size);
27830 seq_printf(seq, "0x%04x", dst->module_flags);
27831@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27832 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27833 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27834 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27835- seq_printf(seq, "Vendor info : %s\n",
27836- chtostr((u8 *) (work32 + 2), 16));
27837- seq_printf(seq, "Product info : %s\n",
27838- chtostr((u8 *) (work32 + 6), 16));
27839- seq_printf(seq, "Description : %s\n",
27840- chtostr((u8 *) (work32 + 10), 16));
27841- seq_printf(seq, "Product rev. : %s\n",
27842- chtostr((u8 *) (work32 + 14), 8));
27843+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27844+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27845+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27846+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27847
27848 seq_printf(seq, "Serial number : ");
27849 print_serial_number(seq, (u8 *) (work32 + 16),
27850@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27851 }
27852
27853 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27854- seq_printf(seq, "Module name : %s\n",
27855- chtostr(result.module_name, 24));
27856- seq_printf(seq, "Module revision : %s\n",
27857- chtostr(result.module_rev, 8));
27858+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
27859+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27860
27861 seq_printf(seq, "Serial number : ");
27862 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27863@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27864 return 0;
27865 }
27866
27867- seq_printf(seq, "Device name : %s\n",
27868- chtostr(result.device_name, 64));
27869- seq_printf(seq, "Service name : %s\n",
27870- chtostr(result.service_name, 64));
27871- seq_printf(seq, "Physical name : %s\n",
27872- chtostr(result.physical_location, 64));
27873- seq_printf(seq, "Instance number : %s\n",
27874- chtostr(result.instance_number, 4));
27875+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
27876+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
27877+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27878+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27879
27880 return 0;
27881 }
27882diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27883--- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27884+++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27885@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27886
27887 spin_lock_irqsave(&c->context_list_lock, flags);
27888
27889- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27890- atomic_inc(&c->context_list_counter);
27891+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27892+ atomic_inc_unchecked(&c->context_list_counter);
27893
27894- entry->context = atomic_read(&c->context_list_counter);
27895+ entry->context = atomic_read_unchecked(&c->context_list_counter);
27896
27897 list_add(&entry->list, &c->context_list);
27898
27899@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27900
27901 #if BITS_PER_LONG == 64
27902 spin_lock_init(&c->context_list_lock);
27903- atomic_set(&c->context_list_counter, 0);
27904+ atomic_set_unchecked(&c->context_list_counter, 0);
27905 INIT_LIST_HEAD(&c->context_list);
27906 #endif
27907
27908diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27909--- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27910+++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27911@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27912
27913 struct abx500_device_entry {
27914 struct list_head list;
27915- struct abx500_ops ops;
27916+ abx500_ops_no_const ops;
27917 struct device *dev;
27918 };
27919
27920diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27921--- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27922+++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27923@@ -13,6 +13,7 @@
27924
27925 #include <linux/kernel.h>
27926 #include <linux/module.h>
27927+#include <linux/slab.h>
27928 #include <linux/init.h>
27929 #include <linux/pci.h>
27930 #include <linux/interrupt.h>
27931diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27932--- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27933+++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27934@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27935 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27936 int ret;
27937
27938+ pax_track_stack();
27939+
27940 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27941 return -EINVAL;
27942
27943diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27944--- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27945+++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27946@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27947 * the lid is closed. This leads to interrupts as soon as a little move
27948 * is done.
27949 */
27950- atomic_inc(&lis3_dev.count);
27951+ atomic_inc_unchecked(&lis3_dev.count);
27952
27953 wake_up_interruptible(&lis3_dev.misc_wait);
27954 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27955@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27956 if (lis3_dev.pm_dev)
27957 pm_runtime_get_sync(lis3_dev.pm_dev);
27958
27959- atomic_set(&lis3_dev.count, 0);
27960+ atomic_set_unchecked(&lis3_dev.count, 0);
27961 return 0;
27962 }
27963
27964@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27965 add_wait_queue(&lis3_dev.misc_wait, &wait);
27966 while (true) {
27967 set_current_state(TASK_INTERRUPTIBLE);
27968- data = atomic_xchg(&lis3_dev.count, 0);
27969+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27970 if (data)
27971 break;
27972
27973@@ -583,7 +583,7 @@ out:
27974 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27975 {
27976 poll_wait(file, &lis3_dev.misc_wait, wait);
27977- if (atomic_read(&lis3_dev.count))
27978+ if (atomic_read_unchecked(&lis3_dev.count))
27979 return POLLIN | POLLRDNORM;
27980 return 0;
27981 }
27982diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
27983--- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
27984+++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
27985@@ -265,7 +265,7 @@ struct lis3lv02d {
27986 struct input_polled_dev *idev; /* input device */
27987 struct platform_device *pdev; /* platform device */
27988 struct regulator_bulk_data regulators[2];
27989- atomic_t count; /* interrupt count after last read */
27990+ atomic_unchecked_t count; /* interrupt count after last read */
27991 union axis_conversion ac; /* hw -> logical axis */
27992 int mapped_btns[3];
27993
27994diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
27995--- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
27996+++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
27997@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27998 unsigned long nsec;
27999
28000 nsec = CLKS2NSEC(clks);
28001- atomic_long_inc(&mcs_op_statistics[op].count);
28002- atomic_long_add(nsec, &mcs_op_statistics[op].total);
28003+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28004+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28005 if (mcs_op_statistics[op].max < nsec)
28006 mcs_op_statistics[op].max = nsec;
28007 }
28008diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28009--- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28010+++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28011@@ -32,9 +32,9 @@
28012
28013 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28014
28015-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28016+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28017 {
28018- unsigned long val = atomic_long_read(v);
28019+ unsigned long val = atomic_long_read_unchecked(v);
28020
28021 seq_printf(s, "%16lu %s\n", val, id);
28022 }
28023@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28024
28025 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28026 for (op = 0; op < mcsop_last; op++) {
28027- count = atomic_long_read(&mcs_op_statistics[op].count);
28028- total = atomic_long_read(&mcs_op_statistics[op].total);
28029+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28030+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28031 max = mcs_op_statistics[op].max;
28032 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28033 count ? total / count : 0, max);
28034diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28035--- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28036+++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28037@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28038 * GRU statistics.
28039 */
28040 struct gru_stats_s {
28041- atomic_long_t vdata_alloc;
28042- atomic_long_t vdata_free;
28043- atomic_long_t gts_alloc;
28044- atomic_long_t gts_free;
28045- atomic_long_t gms_alloc;
28046- atomic_long_t gms_free;
28047- atomic_long_t gts_double_allocate;
28048- atomic_long_t assign_context;
28049- atomic_long_t assign_context_failed;
28050- atomic_long_t free_context;
28051- atomic_long_t load_user_context;
28052- atomic_long_t load_kernel_context;
28053- atomic_long_t lock_kernel_context;
28054- atomic_long_t unlock_kernel_context;
28055- atomic_long_t steal_user_context;
28056- atomic_long_t steal_kernel_context;
28057- atomic_long_t steal_context_failed;
28058- atomic_long_t nopfn;
28059- atomic_long_t asid_new;
28060- atomic_long_t asid_next;
28061- atomic_long_t asid_wrap;
28062- atomic_long_t asid_reuse;
28063- atomic_long_t intr;
28064- atomic_long_t intr_cbr;
28065- atomic_long_t intr_tfh;
28066- atomic_long_t intr_spurious;
28067- atomic_long_t intr_mm_lock_failed;
28068- atomic_long_t call_os;
28069- atomic_long_t call_os_wait_queue;
28070- atomic_long_t user_flush_tlb;
28071- atomic_long_t user_unload_context;
28072- atomic_long_t user_exception;
28073- atomic_long_t set_context_option;
28074- atomic_long_t check_context_retarget_intr;
28075- atomic_long_t check_context_unload;
28076- atomic_long_t tlb_dropin;
28077- atomic_long_t tlb_preload_page;
28078- atomic_long_t tlb_dropin_fail_no_asid;
28079- atomic_long_t tlb_dropin_fail_upm;
28080- atomic_long_t tlb_dropin_fail_invalid;
28081- atomic_long_t tlb_dropin_fail_range_active;
28082- atomic_long_t tlb_dropin_fail_idle;
28083- atomic_long_t tlb_dropin_fail_fmm;
28084- atomic_long_t tlb_dropin_fail_no_exception;
28085- atomic_long_t tfh_stale_on_fault;
28086- atomic_long_t mmu_invalidate_range;
28087- atomic_long_t mmu_invalidate_page;
28088- atomic_long_t flush_tlb;
28089- atomic_long_t flush_tlb_gru;
28090- atomic_long_t flush_tlb_gru_tgh;
28091- atomic_long_t flush_tlb_gru_zero_asid;
28092-
28093- atomic_long_t copy_gpa;
28094- atomic_long_t read_gpa;
28095-
28096- atomic_long_t mesq_receive;
28097- atomic_long_t mesq_receive_none;
28098- atomic_long_t mesq_send;
28099- atomic_long_t mesq_send_failed;
28100- atomic_long_t mesq_noop;
28101- atomic_long_t mesq_send_unexpected_error;
28102- atomic_long_t mesq_send_lb_overflow;
28103- atomic_long_t mesq_send_qlimit_reached;
28104- atomic_long_t mesq_send_amo_nacked;
28105- atomic_long_t mesq_send_put_nacked;
28106- atomic_long_t mesq_page_overflow;
28107- atomic_long_t mesq_qf_locked;
28108- atomic_long_t mesq_qf_noop_not_full;
28109- atomic_long_t mesq_qf_switch_head_failed;
28110- atomic_long_t mesq_qf_unexpected_error;
28111- atomic_long_t mesq_noop_unexpected_error;
28112- atomic_long_t mesq_noop_lb_overflow;
28113- atomic_long_t mesq_noop_qlimit_reached;
28114- atomic_long_t mesq_noop_amo_nacked;
28115- atomic_long_t mesq_noop_put_nacked;
28116- atomic_long_t mesq_noop_page_overflow;
28117+ atomic_long_unchecked_t vdata_alloc;
28118+ atomic_long_unchecked_t vdata_free;
28119+ atomic_long_unchecked_t gts_alloc;
28120+ atomic_long_unchecked_t gts_free;
28121+ atomic_long_unchecked_t gms_alloc;
28122+ atomic_long_unchecked_t gms_free;
28123+ atomic_long_unchecked_t gts_double_allocate;
28124+ atomic_long_unchecked_t assign_context;
28125+ atomic_long_unchecked_t assign_context_failed;
28126+ atomic_long_unchecked_t free_context;
28127+ atomic_long_unchecked_t load_user_context;
28128+ atomic_long_unchecked_t load_kernel_context;
28129+ atomic_long_unchecked_t lock_kernel_context;
28130+ atomic_long_unchecked_t unlock_kernel_context;
28131+ atomic_long_unchecked_t steal_user_context;
28132+ atomic_long_unchecked_t steal_kernel_context;
28133+ atomic_long_unchecked_t steal_context_failed;
28134+ atomic_long_unchecked_t nopfn;
28135+ atomic_long_unchecked_t asid_new;
28136+ atomic_long_unchecked_t asid_next;
28137+ atomic_long_unchecked_t asid_wrap;
28138+ atomic_long_unchecked_t asid_reuse;
28139+ atomic_long_unchecked_t intr;
28140+ atomic_long_unchecked_t intr_cbr;
28141+ atomic_long_unchecked_t intr_tfh;
28142+ atomic_long_unchecked_t intr_spurious;
28143+ atomic_long_unchecked_t intr_mm_lock_failed;
28144+ atomic_long_unchecked_t call_os;
28145+ atomic_long_unchecked_t call_os_wait_queue;
28146+ atomic_long_unchecked_t user_flush_tlb;
28147+ atomic_long_unchecked_t user_unload_context;
28148+ atomic_long_unchecked_t user_exception;
28149+ atomic_long_unchecked_t set_context_option;
28150+ atomic_long_unchecked_t check_context_retarget_intr;
28151+ atomic_long_unchecked_t check_context_unload;
28152+ atomic_long_unchecked_t tlb_dropin;
28153+ atomic_long_unchecked_t tlb_preload_page;
28154+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28155+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28156+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28157+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28158+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28159+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28160+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28161+ atomic_long_unchecked_t tfh_stale_on_fault;
28162+ atomic_long_unchecked_t mmu_invalidate_range;
28163+ atomic_long_unchecked_t mmu_invalidate_page;
28164+ atomic_long_unchecked_t flush_tlb;
28165+ atomic_long_unchecked_t flush_tlb_gru;
28166+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28167+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28168+
28169+ atomic_long_unchecked_t copy_gpa;
28170+ atomic_long_unchecked_t read_gpa;
28171+
28172+ atomic_long_unchecked_t mesq_receive;
28173+ atomic_long_unchecked_t mesq_receive_none;
28174+ atomic_long_unchecked_t mesq_send;
28175+ atomic_long_unchecked_t mesq_send_failed;
28176+ atomic_long_unchecked_t mesq_noop;
28177+ atomic_long_unchecked_t mesq_send_unexpected_error;
28178+ atomic_long_unchecked_t mesq_send_lb_overflow;
28179+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28180+ atomic_long_unchecked_t mesq_send_amo_nacked;
28181+ atomic_long_unchecked_t mesq_send_put_nacked;
28182+ atomic_long_unchecked_t mesq_page_overflow;
28183+ atomic_long_unchecked_t mesq_qf_locked;
28184+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28185+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28186+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28187+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28188+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28189+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28190+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28191+ atomic_long_unchecked_t mesq_noop_put_nacked;
28192+ atomic_long_unchecked_t mesq_noop_page_overflow;
28193
28194 };
28195
28196@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28197 tghop_invalidate, mcsop_last};
28198
28199 struct mcs_op_statistic {
28200- atomic_long_t count;
28201- atomic_long_t total;
28202+ atomic_long_unchecked_t count;
28203+ atomic_long_unchecked_t total;
28204 unsigned long max;
28205 };
28206
28207@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28208
28209 #define STAT(id) do { \
28210 if (gru_options & OPT_STATS) \
28211- atomic_long_inc(&gru_stats.id); \
28212+ atomic_long_inc_unchecked(&gru_stats.id); \
28213 } while (0)
28214
28215 #ifdef CONFIG_SGI_GRU_DEBUG
28216diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28217--- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28218+++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28219@@ -289,7 +289,7 @@ struct xpc_interface {
28220 xpc_notify_func, void *);
28221 void (*received) (short, int, void *);
28222 enum xp_retval (*partid_to_nasids) (short, void *);
28223-};
28224+} __no_const;
28225
28226 extern struct xpc_interface xpc_interface;
28227
28228diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28229--- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28230+++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28231@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28232 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28233 unsigned long timeo = jiffies + HZ;
28234
28235+ pax_track_stack();
28236+
28237 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28238 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28239 goto sleep;
28240@@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28241 unsigned long initial_adr;
28242 int initial_len = len;
28243
28244+ pax_track_stack();
28245+
28246 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28247 adr += chip->start;
28248 initial_adr = adr;
28249@@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28250 int retries = 3;
28251 int ret;
28252
28253+ pax_track_stack();
28254+
28255 adr += chip->start;
28256
28257 retry:
28258diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28259--- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28260+++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28261@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28262 unsigned long cmd_addr;
28263 struct cfi_private *cfi = map->fldrv_priv;
28264
28265+ pax_track_stack();
28266+
28267 adr += chip->start;
28268
28269 /* Ensure cmd read/writes are aligned. */
28270@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28271 DECLARE_WAITQUEUE(wait, current);
28272 int wbufsize, z;
28273
28274+ pax_track_stack();
28275+
28276 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28277 if (adr & (map_bankwidth(map)-1))
28278 return -EINVAL;
28279@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28280 DECLARE_WAITQUEUE(wait, current);
28281 int ret = 0;
28282
28283+ pax_track_stack();
28284+
28285 adr += chip->start;
28286
28287 /* Let's determine this according to the interleave only once */
28288@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28289 unsigned long timeo = jiffies + HZ;
28290 DECLARE_WAITQUEUE(wait, current);
28291
28292+ pax_track_stack();
28293+
28294 adr += chip->start;
28295
28296 /* Let's determine this according to the interleave only once */
28297@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28298 unsigned long timeo = jiffies + HZ;
28299 DECLARE_WAITQUEUE(wait, current);
28300
28301+ pax_track_stack();
28302+
28303 adr += chip->start;
28304
28305 /* Let's determine this according to the interleave only once */
28306diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28307--- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28308+++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28309@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28310
28311 /* The ECC will not be calculated correctly if less than 512 is written */
28312 /* DBB-
28313- if (len != 0x200 && eccbuf)
28314+ if (len != 0x200)
28315 printk(KERN_WARNING
28316 "ECC needs a full sector write (adr: %lx size %lx)\n",
28317 (long) to, (long) len);
28318diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28319--- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28320+++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28321@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28322 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28323
28324 /* Don't allow read past end of device */
28325- if (from >= this->totlen)
28326+ if (from >= this->totlen || !len)
28327 return -EINVAL;
28328
28329 /* Don't allow a single read to cross a 512-byte block boundary */
28330diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28331--- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28332+++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28333@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28334 loff_t offset;
28335 uint16_t srcunitswap = cpu_to_le16(srcunit);
28336
28337+ pax_track_stack();
28338+
28339 eun = &part->EUNInfo[srcunit];
28340 xfer = &part->XferInfo[xferunit];
28341 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28342diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28343--- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28344+++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28345@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28346 struct inftl_oob oob;
28347 size_t retlen;
28348
28349+ pax_track_stack();
28350+
28351 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28352 "pending=%d)\n", inftl, thisVUC, pendingblock);
28353
28354diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28355--- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28356+++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28357@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28358 struct INFTLPartition *ip;
28359 size_t retlen;
28360
28361+ pax_track_stack();
28362+
28363 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28364
28365 /*
28366diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28367--- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28368+++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28369@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28370 {
28371 map_word pfow_val[4];
28372
28373+ pax_track_stack();
28374+
28375 /* Check identification string */
28376 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28377 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28378diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28379--- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28380+++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28381@@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28382 u_long size;
28383 struct mtd_info_user info;
28384
28385+ pax_track_stack();
28386+
28387 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28388
28389 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28390diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28391--- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28392+++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28393@@ -25,6 +25,7 @@
28394 #include <linux/pci.h>
28395 #include <linux/mtd/mtd.h>
28396 #include <linux/module.h>
28397+#include <linux/slab.h>
28398
28399 #include "denali.h"
28400
28401diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28402--- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28403+++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28404@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28405 int inplace = 1;
28406 size_t retlen;
28407
28408+ pax_track_stack();
28409+
28410 memset(BlockMap, 0xff, sizeof(BlockMap));
28411 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28412
28413diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28414--- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28415+++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28416@@ -24,6 +24,7 @@
28417 #include <asm/errno.h>
28418 #include <linux/delay.h>
28419 #include <linux/slab.h>
28420+#include <linux/sched.h>
28421 #include <linux/mtd/mtd.h>
28422 #include <linux/mtd/nand.h>
28423 #include <linux/mtd/nftl.h>
28424@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28425 struct mtd_info *mtd = nftl->mbd.mtd;
28426 unsigned int i;
28427
28428+ pax_track_stack();
28429+
28430 /* Assume logical EraseSize == physical erasesize for starting the scan.
28431 We'll sort it out later if we find a MediaHeader which says otherwise */
28432 /* Actually, we won't. The new DiskOnChip driver has already scanned
28433diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28434--- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28435+++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28436@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28437 static int __init bytes_str_to_int(const char *str)
28438 {
28439 char *endp;
28440- unsigned long result;
28441+ unsigned long result, scale = 1;
28442
28443 result = simple_strtoul(str, &endp, 0);
28444 if (str == endp || result >= INT_MAX) {
28445@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28446
28447 switch (*endp) {
28448 case 'G':
28449- result *= 1024;
28450+ scale *= 1024;
28451 case 'M':
28452- result *= 1024;
28453+ scale *= 1024;
28454 case 'K':
28455- result *= 1024;
28456+ scale *= 1024;
28457 if (endp[1] == 'i' && endp[2] == 'B')
28458 endp += 2;
28459 case '\0':
28460@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28461 return -EINVAL;
28462 }
28463
28464- return result;
28465+ if ((intoverflow_t)result*scale >= INT_MAX) {
28466+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28467+ str);
28468+ return -EINVAL;
28469+ }
28470+
28471+ return result*scale;
28472 }
28473
28474 /**
28475diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28476--- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28477+++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28478@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28479 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28480 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28481
28482-static struct bfa_ioc_hwif nw_hwif_ct;
28483+static struct bfa_ioc_hwif nw_hwif_ct = {
28484+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28485+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28486+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28487+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28488+ .ioc_map_port = bfa_ioc_ct_map_port,
28489+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28490+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28491+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28492+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28493+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28494+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28495+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28496+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28497+};
28498
28499 /**
28500 * Called from bfa_ioc_attach() to map asic specific calls.
28501@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28502 void
28503 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28504 {
28505- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28506- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28507- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28508- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28509- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28510- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28511- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28512- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28513- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28514- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28515- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28516- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28517- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28518-
28519 ioc->ioc_hwif = &nw_hwif_ct;
28520 }
28521
28522diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28523--- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28524+++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28525@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28526 struct bna_intr_info *intr_info =
28527 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28528 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28529- struct bna_tx_event_cbfn tx_cbfn;
28530+ static struct bna_tx_event_cbfn tx_cbfn = {
28531+ /* Initialize the tx event handlers */
28532+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28533+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28534+ .tx_stall_cbfn = bnad_cb_tx_stall,
28535+ .tx_resume_cbfn = bnad_cb_tx_resume,
28536+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28537+ };
28538 struct bna_tx *tx;
28539 unsigned long flags;
28540
28541@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28542 tx_config->txq_depth = bnad->txq_depth;
28543 tx_config->tx_type = BNA_TX_T_REGULAR;
28544
28545- /* Initialize the tx event handlers */
28546- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28547- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28548- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28549- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28550- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28551-
28552 /* Get BNA's resource requirement for one tx object */
28553 spin_lock_irqsave(&bnad->bna_lock, flags);
28554 bna_tx_res_req(bnad->num_txq_per_tx,
28555@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28556 struct bna_intr_info *intr_info =
28557 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28558 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28559- struct bna_rx_event_cbfn rx_cbfn;
28560+ static struct bna_rx_event_cbfn rx_cbfn = {
28561+ /* Initialize the Rx event handlers */
28562+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28563+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28564+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28565+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28566+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28567+ .rx_post_cbfn = bnad_cb_rx_post
28568+ };
28569 struct bna_rx *rx;
28570 unsigned long flags;
28571
28572 /* Initialize the Rx object configuration */
28573 bnad_init_rx_config(bnad, rx_config);
28574
28575- /* Initialize the Rx event handlers */
28576- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28577- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28578- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28579- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28580- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28581- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28582-
28583 /* Get BNA's resource requirement for one Rx object */
28584 spin_lock_irqsave(&bnad->bna_lock, flags);
28585 bna_rx_res_req(rx_config, res_info);
28586diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28587--- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28588+++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28589@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28590 int rc = 0;
28591 u32 magic, csum;
28592
28593+ pax_track_stack();
28594+
28595 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28596 goto test_nvram_done;
28597
28598diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28599--- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28600+++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28601@@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28602 int i, rc;
28603 u32 magic, crc;
28604
28605+ pax_track_stack();
28606+
28607 if (BP_NOMCP(bp))
28608 return 0;
28609
28610diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28611--- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28612+++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28613@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28614 */
28615 struct l2t_skb_cb {
28616 arp_failure_handler_func arp_failure_handler;
28617-};
28618+} __no_const;
28619
28620 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28621
28622diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28623--- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28624+++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28625@@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28626 unsigned int nchan = adap->params.nports;
28627 struct msix_entry entries[MAX_INGQ + 1];
28628
28629+ pax_track_stack();
28630+
28631 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28632 entries[i].entry = i;
28633
28634diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28635--- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28636+++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28637@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28638 u8 vpd[VPD_LEN], csum;
28639 unsigned int vpdr_len, kw_offset, id_len;
28640
28641+ pax_track_stack();
28642+
28643 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28644 if (ret < 0)
28645 return ret;
28646diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28647--- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28648+++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28649@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28650 {
28651 struct e1000_hw *hw = &adapter->hw;
28652 struct e1000_mac_info *mac = &hw->mac;
28653- struct e1000_mac_operations *func = &mac->ops;
28654+ e1000_mac_operations_no_const *func = &mac->ops;
28655 u32 swsm = 0;
28656 u32 swsm2 = 0;
28657 bool force_clear_smbi = false;
28658diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28659--- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28660+++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28661@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28662 {
28663 struct e1000_hw *hw = &adapter->hw;
28664 struct e1000_mac_info *mac = &hw->mac;
28665- struct e1000_mac_operations *func = &mac->ops;
28666+ e1000_mac_operations_no_const *func = &mac->ops;
28667
28668 /* Set media type */
28669 switch (adapter->pdev->device) {
28670diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28671--- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28672+++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28673@@ -775,6 +775,7 @@ struct e1000_mac_operations {
28674 void (*write_vfta)(struct e1000_hw *, u32, u32);
28675 s32 (*read_mac_addr)(struct e1000_hw *);
28676 };
28677+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28678
28679 /* Function pointers for the PHY. */
28680 struct e1000_phy_operations {
28681@@ -798,6 +799,7 @@ struct e1000_phy_operations {
28682 void (*power_up)(struct e1000_hw *);
28683 void (*power_down)(struct e1000_hw *);
28684 };
28685+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28686
28687 /* Function pointers for the NVM. */
28688 struct e1000_nvm_operations {
28689@@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28690 s32 (*validate)(struct e1000_hw *);
28691 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28692 };
28693+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28694
28695 struct e1000_mac_info {
28696- struct e1000_mac_operations ops;
28697+ e1000_mac_operations_no_const ops;
28698 u8 addr[ETH_ALEN];
28699 u8 perm_addr[ETH_ALEN];
28700
28701@@ -852,7 +855,7 @@ struct e1000_mac_info {
28702 };
28703
28704 struct e1000_phy_info {
28705- struct e1000_phy_operations ops;
28706+ e1000_phy_operations_no_const ops;
28707
28708 enum e1000_phy_type type;
28709
28710@@ -886,7 +889,7 @@ struct e1000_phy_info {
28711 };
28712
28713 struct e1000_nvm_info {
28714- struct e1000_nvm_operations ops;
28715+ e1000_nvm_operations_no_const ops;
28716
28717 enum e1000_nvm_type type;
28718 enum e1000_nvm_override override;
28719diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28720--- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28721+++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28722@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28723 unsigned char buf[512];
28724 int count1;
28725
28726+ pax_track_stack();
28727+
28728 if (!count)
28729 return;
28730
28731diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28732--- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28733+++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28734@@ -314,6 +314,7 @@ struct e1000_mac_operations {
28735 s32 (*read_mac_addr)(struct e1000_hw *);
28736 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28737 };
28738+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28739
28740 struct e1000_phy_operations {
28741 s32 (*acquire)(struct e1000_hw *);
28742@@ -330,6 +331,7 @@ struct e1000_phy_operations {
28743 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28744 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28745 };
28746+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28747
28748 struct e1000_nvm_operations {
28749 s32 (*acquire)(struct e1000_hw *);
28750@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28751 s32 (*update)(struct e1000_hw *);
28752 s32 (*validate)(struct e1000_hw *);
28753 };
28754+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28755
28756 struct e1000_info {
28757 s32 (*get_invariants)(struct e1000_hw *);
28758@@ -350,7 +353,7 @@ struct e1000_info {
28759 extern const struct e1000_info e1000_82575_info;
28760
28761 struct e1000_mac_info {
28762- struct e1000_mac_operations ops;
28763+ e1000_mac_operations_no_const ops;
28764
28765 u8 addr[6];
28766 u8 perm_addr[6];
28767@@ -388,7 +391,7 @@ struct e1000_mac_info {
28768 };
28769
28770 struct e1000_phy_info {
28771- struct e1000_phy_operations ops;
28772+ e1000_phy_operations_no_const ops;
28773
28774 enum e1000_phy_type type;
28775
28776@@ -423,7 +426,7 @@ struct e1000_phy_info {
28777 };
28778
28779 struct e1000_nvm_info {
28780- struct e1000_nvm_operations ops;
28781+ e1000_nvm_operations_no_const ops;
28782 enum e1000_nvm_type type;
28783 enum e1000_nvm_override override;
28784
28785@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28786 s32 (*check_for_ack)(struct e1000_hw *, u16);
28787 s32 (*check_for_rst)(struct e1000_hw *, u16);
28788 };
28789+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28790
28791 struct e1000_mbx_stats {
28792 u32 msgs_tx;
28793@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28794 };
28795
28796 struct e1000_mbx_info {
28797- struct e1000_mbx_operations ops;
28798+ e1000_mbx_operations_no_const ops;
28799 struct e1000_mbx_stats stats;
28800 u32 timeout;
28801 u32 usec_delay;
28802diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28803--- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28804+++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28805@@ -189,9 +189,10 @@ struct e1000_mac_operations {
28806 s32 (*read_mac_addr)(struct e1000_hw *);
28807 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28808 };
28809+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28810
28811 struct e1000_mac_info {
28812- struct e1000_mac_operations ops;
28813+ e1000_mac_operations_no_const ops;
28814 u8 addr[6];
28815 u8 perm_addr[6];
28816
28817@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28818 s32 (*check_for_ack)(struct e1000_hw *);
28819 s32 (*check_for_rst)(struct e1000_hw *);
28820 };
28821+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28822
28823 struct e1000_mbx_stats {
28824 u32 msgs_tx;
28825@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28826 };
28827
28828 struct e1000_mbx_info {
28829- struct e1000_mbx_operations ops;
28830+ e1000_mbx_operations_no_const ops;
28831 struct e1000_mbx_stats stats;
28832 u32 timeout;
28833 u32 usec_delay;
28834diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28835--- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28836+++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28837@@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28838 u32 rctl;
28839 int i;
28840
28841+ pax_track_stack();
28842+
28843 /* Check for Promiscuous and All Multicast modes */
28844
28845 rctl = IXGB_READ_REG(hw, RCTL);
28846diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28847--- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28848+++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28849@@ -261,6 +261,9 @@ void __devinit
28850 ixgb_check_options(struct ixgb_adapter *adapter)
28851 {
28852 int bd = adapter->bd_number;
28853+
28854+ pax_track_stack();
28855+
28856 if (bd >= IXGB_MAX_NIC) {
28857 pr_notice("Warning: no configuration for board #%i\n", bd);
28858 pr_notice("Using defaults for all values\n");
28859diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28860--- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28861+++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28862@@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28863 s32 (*update_checksum)(struct ixgbe_hw *);
28864 u16 (*calc_checksum)(struct ixgbe_hw *);
28865 };
28866+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28867
28868 struct ixgbe_mac_operations {
28869 s32 (*init_hw)(struct ixgbe_hw *);
28870@@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28871 /* Flow Control */
28872 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28873 };
28874+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28875
28876 struct ixgbe_phy_operations {
28877 s32 (*identify)(struct ixgbe_hw *);
28878@@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28879 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28880 s32 (*check_overtemp)(struct ixgbe_hw *);
28881 };
28882+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28883
28884 struct ixgbe_eeprom_info {
28885- struct ixgbe_eeprom_operations ops;
28886+ ixgbe_eeprom_operations_no_const ops;
28887 enum ixgbe_eeprom_type type;
28888 u32 semaphore_delay;
28889 u16 word_size;
28890@@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28891
28892 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28893 struct ixgbe_mac_info {
28894- struct ixgbe_mac_operations ops;
28895+ ixgbe_mac_operations_no_const ops;
28896 enum ixgbe_mac_type type;
28897 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28898 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28899@@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28900 };
28901
28902 struct ixgbe_phy_info {
28903- struct ixgbe_phy_operations ops;
28904+ ixgbe_phy_operations_no_const ops;
28905 struct mdio_if_info mdio;
28906 enum ixgbe_phy_type type;
28907 u32 id;
28908@@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28909 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28910 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28911 };
28912+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28913
28914 struct ixgbe_mbx_stats {
28915 u32 msgs_tx;
28916@@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28917 };
28918
28919 struct ixgbe_mbx_info {
28920- struct ixgbe_mbx_operations ops;
28921+ ixgbe_mbx_operations_no_const ops;
28922 struct ixgbe_mbx_stats stats;
28923 u32 timeout;
28924 u32 usec_delay;
28925diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28926--- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28927+++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28928@@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28929 s32 (*clear_vfta)(struct ixgbe_hw *);
28930 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28931 };
28932+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28933
28934 enum ixgbe_mac_type {
28935 ixgbe_mac_unknown = 0,
28936@@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28937 };
28938
28939 struct ixgbe_mac_info {
28940- struct ixgbe_mac_operations ops;
28941+ ixgbe_mac_operations_no_const ops;
28942 u8 addr[6];
28943 u8 perm_addr[6];
28944
28945@@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28946 s32 (*check_for_ack)(struct ixgbe_hw *);
28947 s32 (*check_for_rst)(struct ixgbe_hw *);
28948 };
28949+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28950
28951 struct ixgbe_mbx_stats {
28952 u32 msgs_tx;
28953@@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28954 };
28955
28956 struct ixgbe_mbx_info {
28957- struct ixgbe_mbx_operations ops;
28958+ ixgbe_mbx_operations_no_const ops;
28959 struct ixgbe_mbx_stats stats;
28960 u32 timeout;
28961 u32 udelay;
28962diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
28963--- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
28964+++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
28965@@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
28966 int rc;
28967 u64 counter[TOTAL_PORT_COUNTER_NUM];
28968
28969+ pax_track_stack();
28970+
28971 mutex_lock(&hw_priv->lock);
28972 n = SWITCH_PORT_NUM;
28973 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28974diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
28975--- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
28976+++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
28977@@ -40,6 +40,7 @@
28978 #include <linux/dma-mapping.h>
28979 #include <linux/slab.h>
28980 #include <linux/io-mapping.h>
28981+#include <linux/sched.h>
28982
28983 #include <linux/mlx4/device.h>
28984 #include <linux/mlx4/doorbell.h>
28985@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28986 u64 icm_size;
28987 int err;
28988
28989+ pax_track_stack();
28990+
28991 err = mlx4_QUERY_FW(dev);
28992 if (err) {
28993 if (err == -EACCES)
28994diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
28995--- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
28996+++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
28997@@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
28998 int i, num_irqs, err;
28999 u8 first_ldg;
29000
29001+ pax_track_stack();
29002+
29003 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29004 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29005 ldg_num_map[i] = first_ldg + i;
29006diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29007--- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29008+++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29009@@ -82,7 +82,7 @@ static int cards_found;
29010 /*
29011 * VLB I/O addresses
29012 */
29013-static unsigned int pcnet32_portlist[] __initdata =
29014+static unsigned int pcnet32_portlist[] __devinitdata =
29015 { 0x300, 0x320, 0x340, 0x360, 0 };
29016
29017 static int pcnet32_debug;
29018@@ -270,7 +270,7 @@ struct pcnet32_private {
29019 struct sk_buff **rx_skbuff;
29020 dma_addr_t *tx_dma_addr;
29021 dma_addr_t *rx_dma_addr;
29022- struct pcnet32_access a;
29023+ struct pcnet32_access *a;
29024 spinlock_t lock; /* Guard lock */
29025 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29026 unsigned int rx_ring_size; /* current rx ring size */
29027@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29028 u16 val;
29029
29030 netif_wake_queue(dev);
29031- val = lp->a.read_csr(ioaddr, CSR3);
29032+ val = lp->a->read_csr(ioaddr, CSR3);
29033 val &= 0x00ff;
29034- lp->a.write_csr(ioaddr, CSR3, val);
29035+ lp->a->write_csr(ioaddr, CSR3, val);
29036 napi_enable(&lp->napi);
29037 }
29038
29039@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29040 r = mii_link_ok(&lp->mii_if);
29041 } else if (lp->chip_version >= PCNET32_79C970A) {
29042 ulong ioaddr = dev->base_addr; /* card base I/O address */
29043- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29044+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29045 } else { /* can not detect link on really old chips */
29046 r = 1;
29047 }
29048@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29049 pcnet32_netif_stop(dev);
29050
29051 spin_lock_irqsave(&lp->lock, flags);
29052- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29053+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29054
29055 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29056
29057@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29058 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29059 {
29060 struct pcnet32_private *lp = netdev_priv(dev);
29061- struct pcnet32_access *a = &lp->a; /* access to registers */
29062+ struct pcnet32_access *a = lp->a; /* access to registers */
29063 ulong ioaddr = dev->base_addr; /* card base I/O address */
29064 struct sk_buff *skb; /* sk buff */
29065 int x, i; /* counters */
29066@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29067 pcnet32_netif_stop(dev);
29068
29069 spin_lock_irqsave(&lp->lock, flags);
29070- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29071+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29072
29073 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29074
29075 /* Reset the PCNET32 */
29076- lp->a.reset(ioaddr);
29077- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29078+ lp->a->reset(ioaddr);
29079+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29080
29081 /* switch pcnet32 to 32bit mode */
29082- lp->a.write_bcr(ioaddr, 20, 2);
29083+ lp->a->write_bcr(ioaddr, 20, 2);
29084
29085 /* purge & init rings but don't actually restart */
29086 pcnet32_restart(dev, 0x0000);
29087
29088- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29089+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29090
29091 /* Initialize Transmit buffers. */
29092 size = data_len + 15;
29093@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29094
29095 /* set int loopback in CSR15 */
29096 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29097- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29098+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29099
29100 teststatus = cpu_to_le16(0x8000);
29101- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29102+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29103
29104 /* Check status of descriptors */
29105 for (x = 0; x < numbuffs; x++) {
29106@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29107 }
29108 }
29109
29110- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29111+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29112 wmb();
29113 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29114 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29115@@ -1015,7 +1015,7 @@ clean_up:
29116 pcnet32_restart(dev, CSR0_NORMAL);
29117 } else {
29118 pcnet32_purge_rx_ring(dev);
29119- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29120+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29121 }
29122 spin_unlock_irqrestore(&lp->lock, flags);
29123
29124@@ -1025,7 +1025,7 @@ clean_up:
29125 static void pcnet32_led_blink_callback(struct net_device *dev)
29126 {
29127 struct pcnet32_private *lp = netdev_priv(dev);
29128- struct pcnet32_access *a = &lp->a;
29129+ struct pcnet32_access *a = lp->a;
29130 ulong ioaddr = dev->base_addr;
29131 unsigned long flags;
29132 int i;
29133@@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29134 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29135 {
29136 struct pcnet32_private *lp = netdev_priv(dev);
29137- struct pcnet32_access *a = &lp->a;
29138+ struct pcnet32_access *a = lp->a;
29139 ulong ioaddr = dev->base_addr;
29140 unsigned long flags;
29141 int i, regs[4];
29142@@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29143 {
29144 int csr5;
29145 struct pcnet32_private *lp = netdev_priv(dev);
29146- struct pcnet32_access *a = &lp->a;
29147+ struct pcnet32_access *a = lp->a;
29148 ulong ioaddr = dev->base_addr;
29149 int ticks;
29150
29151@@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29152 spin_lock_irqsave(&lp->lock, flags);
29153 if (pcnet32_tx(dev)) {
29154 /* reset the chip to clear the error condition, then restart */
29155- lp->a.reset(ioaddr);
29156- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29157+ lp->a->reset(ioaddr);
29158+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29159 pcnet32_restart(dev, CSR0_START);
29160 netif_wake_queue(dev);
29161 }
29162@@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29163 __napi_complete(napi);
29164
29165 /* clear interrupt masks */
29166- val = lp->a.read_csr(ioaddr, CSR3);
29167+ val = lp->a->read_csr(ioaddr, CSR3);
29168 val &= 0x00ff;
29169- lp->a.write_csr(ioaddr, CSR3, val);
29170+ lp->a->write_csr(ioaddr, CSR3, val);
29171
29172 /* Set interrupt enable. */
29173- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29174+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29175
29176 spin_unlock_irqrestore(&lp->lock, flags);
29177 }
29178@@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29179 int i, csr0;
29180 u16 *buff = ptr;
29181 struct pcnet32_private *lp = netdev_priv(dev);
29182- struct pcnet32_access *a = &lp->a;
29183+ struct pcnet32_access *a = lp->a;
29184 ulong ioaddr = dev->base_addr;
29185 unsigned long flags;
29186
29187@@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29188 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29189 if (lp->phymask & (1 << j)) {
29190 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29191- lp->a.write_bcr(ioaddr, 33,
29192+ lp->a->write_bcr(ioaddr, 33,
29193 (j << 5) | i);
29194- *buff++ = lp->a.read_bcr(ioaddr, 34);
29195+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29196 }
29197 }
29198 }
29199@@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29200 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29201 lp->options |= PCNET32_PORT_FD;
29202
29203- lp->a = *a;
29204+ lp->a = a;
29205
29206 /* prior to register_netdev, dev->name is not yet correct */
29207 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29208@@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29209 if (lp->mii) {
29210 /* lp->phycount and lp->phymask are set to 0 by memset above */
29211
29212- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29213+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29214 /* scan for PHYs */
29215 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29216 unsigned short id1, id2;
29217@@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29218 pr_info("Found PHY %04x:%04x at address %d\n",
29219 id1, id2, i);
29220 }
29221- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29222+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29223 if (lp->phycount > 1)
29224 lp->options |= PCNET32_PORT_MII;
29225 }
29226@@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29227 }
29228
29229 /* Reset the PCNET32 */
29230- lp->a.reset(ioaddr);
29231+ lp->a->reset(ioaddr);
29232
29233 /* switch pcnet32 to 32bit mode */
29234- lp->a.write_bcr(ioaddr, 20, 2);
29235+ lp->a->write_bcr(ioaddr, 20, 2);
29236
29237 netif_printk(lp, ifup, KERN_DEBUG, dev,
29238 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29239@@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29240 (u32) (lp->init_dma_addr));
29241
29242 /* set/reset autoselect bit */
29243- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29244+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29245 if (lp->options & PCNET32_PORT_ASEL)
29246 val |= 2;
29247- lp->a.write_bcr(ioaddr, 2, val);
29248+ lp->a->write_bcr(ioaddr, 2, val);
29249
29250 /* handle full duplex setting */
29251 if (lp->mii_if.full_duplex) {
29252- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29253+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29254 if (lp->options & PCNET32_PORT_FD) {
29255 val |= 1;
29256 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29257@@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29258 if (lp->chip_version == 0x2627)
29259 val |= 3;
29260 }
29261- lp->a.write_bcr(ioaddr, 9, val);
29262+ lp->a->write_bcr(ioaddr, 9, val);
29263 }
29264
29265 /* set/reset GPSI bit in test register */
29266- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29267+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29268 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29269 val |= 0x10;
29270- lp->a.write_csr(ioaddr, 124, val);
29271+ lp->a->write_csr(ioaddr, 124, val);
29272
29273 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29274 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29275@@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29276 * duplex, and/or enable auto negotiation, and clear DANAS
29277 */
29278 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29279- lp->a.write_bcr(ioaddr, 32,
29280- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29281+ lp->a->write_bcr(ioaddr, 32,
29282+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29283 /* disable Auto Negotiation, set 10Mpbs, HD */
29284- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29285+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29286 if (lp->options & PCNET32_PORT_FD)
29287 val |= 0x10;
29288 if (lp->options & PCNET32_PORT_100)
29289 val |= 0x08;
29290- lp->a.write_bcr(ioaddr, 32, val);
29291+ lp->a->write_bcr(ioaddr, 32, val);
29292 } else {
29293 if (lp->options & PCNET32_PORT_ASEL) {
29294- lp->a.write_bcr(ioaddr, 32,
29295- lp->a.read_bcr(ioaddr,
29296+ lp->a->write_bcr(ioaddr, 32,
29297+ lp->a->read_bcr(ioaddr,
29298 32) | 0x0080);
29299 /* enable auto negotiate, setup, disable fd */
29300- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29301+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29302 val |= 0x20;
29303- lp->a.write_bcr(ioaddr, 32, val);
29304+ lp->a->write_bcr(ioaddr, 32, val);
29305 }
29306 }
29307 } else {
29308@@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29309 * There is really no good other way to handle multiple PHYs
29310 * other than turning off all automatics
29311 */
29312- val = lp->a.read_bcr(ioaddr, 2);
29313- lp->a.write_bcr(ioaddr, 2, val & ~2);
29314- val = lp->a.read_bcr(ioaddr, 32);
29315- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29316+ val = lp->a->read_bcr(ioaddr, 2);
29317+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29318+ val = lp->a->read_bcr(ioaddr, 32);
29319+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29320
29321 if (!(lp->options & PCNET32_PORT_ASEL)) {
29322 /* setup ecmd */
29323@@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29324 ecmd.speed =
29325 lp->
29326 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29327- bcr9 = lp->a.read_bcr(ioaddr, 9);
29328+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29329
29330 if (lp->options & PCNET32_PORT_FD) {
29331 ecmd.duplex = DUPLEX_FULL;
29332@@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29333 ecmd.duplex = DUPLEX_HALF;
29334 bcr9 |= ~(1 << 0);
29335 }
29336- lp->a.write_bcr(ioaddr, 9, bcr9);
29337+ lp->a->write_bcr(ioaddr, 9, bcr9);
29338 }
29339
29340 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29341@@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29342
29343 #ifdef DO_DXSUFLO
29344 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29345- val = lp->a.read_csr(ioaddr, CSR3);
29346+ val = lp->a->read_csr(ioaddr, CSR3);
29347 val |= 0x40;
29348- lp->a.write_csr(ioaddr, CSR3, val);
29349+ lp->a->write_csr(ioaddr, CSR3, val);
29350 }
29351 #endif
29352
29353@@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29354 napi_enable(&lp->napi);
29355
29356 /* Re-initialize the PCNET32, and start it when done. */
29357- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29358- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29359+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29360+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29361
29362- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29363- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29364+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29365+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29366
29367 netif_start_queue(dev);
29368
29369@@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29370
29371 i = 0;
29372 while (i++ < 100)
29373- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29374+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29375 break;
29376 /*
29377 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29378 * reports that doing so triggers a bug in the '974.
29379 */
29380- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29381+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29382
29383 netif_printk(lp, ifup, KERN_DEBUG, dev,
29384 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29385 i,
29386 (u32) (lp->init_dma_addr),
29387- lp->a.read_csr(ioaddr, CSR0));
29388+ lp->a->read_csr(ioaddr, CSR0));
29389
29390 spin_unlock_irqrestore(&lp->lock, flags);
29391
29392@@ -2236,7 +2236,7 @@ err_free_ring:
29393 * Switch back to 16bit mode to avoid problems with dumb
29394 * DOS packet driver after a warm reboot
29395 */
29396- lp->a.write_bcr(ioaddr, 20, 4);
29397+ lp->a->write_bcr(ioaddr, 20, 4);
29398
29399 err_free_irq:
29400 spin_unlock_irqrestore(&lp->lock, flags);
29401@@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29402
29403 /* wait for stop */
29404 for (i = 0; i < 100; i++)
29405- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29406+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29407 break;
29408
29409 if (i >= 100)
29410@@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29411 return;
29412
29413 /* ReInit Ring */
29414- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29415+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29416 i = 0;
29417 while (i++ < 1000)
29418- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29419+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29420 break;
29421
29422- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29423+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29424 }
29425
29426 static void pcnet32_tx_timeout(struct net_device *dev)
29427@@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29428 /* Transmitter timeout, serious problems. */
29429 if (pcnet32_debug & NETIF_MSG_DRV)
29430 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29431- dev->name, lp->a.read_csr(ioaddr, CSR0));
29432- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29433+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29434+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29435 dev->stats.tx_errors++;
29436 if (netif_msg_tx_err(lp)) {
29437 int i;
29438@@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29439
29440 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29441 "%s() called, csr0 %4.4x\n",
29442- __func__, lp->a.read_csr(ioaddr, CSR0));
29443+ __func__, lp->a->read_csr(ioaddr, CSR0));
29444
29445 /* Default status -- will not enable Successful-TxDone
29446 * interrupt when that option is available to us.
29447@@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29448 dev->stats.tx_bytes += skb->len;
29449
29450 /* Trigger an immediate send poll. */
29451- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29452+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29453
29454 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29455 lp->tx_full = 1;
29456@@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29457
29458 spin_lock(&lp->lock);
29459
29460- csr0 = lp->a.read_csr(ioaddr, CSR0);
29461+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29462 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29463 if (csr0 == 0xffff)
29464 break; /* PCMCIA remove happened */
29465 /* Acknowledge all of the current interrupt sources ASAP. */
29466- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29467+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29468
29469 netif_printk(lp, intr, KERN_DEBUG, dev,
29470 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29471- csr0, lp->a.read_csr(ioaddr, CSR0));
29472+ csr0, lp->a->read_csr(ioaddr, CSR0));
29473
29474 /* Log misc errors. */
29475 if (csr0 & 0x4000)
29476@@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29477 if (napi_schedule_prep(&lp->napi)) {
29478 u16 val;
29479 /* set interrupt masks */
29480- val = lp->a.read_csr(ioaddr, CSR3);
29481+ val = lp->a->read_csr(ioaddr, CSR3);
29482 val |= 0x5f00;
29483- lp->a.write_csr(ioaddr, CSR3, val);
29484+ lp->a->write_csr(ioaddr, CSR3, val);
29485
29486 __napi_schedule(&lp->napi);
29487 break;
29488 }
29489- csr0 = lp->a.read_csr(ioaddr, CSR0);
29490+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29491 }
29492
29493 netif_printk(lp, intr, KERN_DEBUG, dev,
29494 "exiting interrupt, csr0=%#4.4x\n",
29495- lp->a.read_csr(ioaddr, CSR0));
29496+ lp->a->read_csr(ioaddr, CSR0));
29497
29498 spin_unlock(&lp->lock);
29499
29500@@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29501
29502 spin_lock_irqsave(&lp->lock, flags);
29503
29504- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29505+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29506
29507 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29508 "Shutting down ethercard, status was %2.2x\n",
29509- lp->a.read_csr(ioaddr, CSR0));
29510+ lp->a->read_csr(ioaddr, CSR0));
29511
29512 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29513- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29514+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29515
29516 /*
29517 * Switch back to 16bit mode to avoid problems with dumb
29518 * DOS packet driver after a warm reboot
29519 */
29520- lp->a.write_bcr(ioaddr, 20, 4);
29521+ lp->a->write_bcr(ioaddr, 20, 4);
29522
29523 spin_unlock_irqrestore(&lp->lock, flags);
29524
29525@@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29526 unsigned long flags;
29527
29528 spin_lock_irqsave(&lp->lock, flags);
29529- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29530+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29531 spin_unlock_irqrestore(&lp->lock, flags);
29532
29533 return &dev->stats;
29534@@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29535 if (dev->flags & IFF_ALLMULTI) {
29536 ib->filter[0] = cpu_to_le32(~0U);
29537 ib->filter[1] = cpu_to_le32(~0U);
29538- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29539- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29540- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29541- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29542+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29543+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29544+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29545+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29546 return;
29547 }
29548 /* clear the multicast filter */
29549@@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29550 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29551 }
29552 for (i = 0; i < 4; i++)
29553- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29554+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29555 le16_to_cpu(mcast_table[i]));
29556 }
29557
29558@@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29559
29560 spin_lock_irqsave(&lp->lock, flags);
29561 suspended = pcnet32_suspend(dev, &flags, 0);
29562- csr15 = lp->a.read_csr(ioaddr, CSR15);
29563+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29564 if (dev->flags & IFF_PROMISC) {
29565 /* Log any net taps. */
29566 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29567 lp->init_block->mode =
29568 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29569 7);
29570- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29571+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29572 } else {
29573 lp->init_block->mode =
29574 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29575- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29576+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29577 pcnet32_load_multicast(dev);
29578 }
29579
29580 if (suspended) {
29581 int csr5;
29582 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29583- csr5 = lp->a.read_csr(ioaddr, CSR5);
29584- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29585+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29586+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29587 } else {
29588- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29589+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29590 pcnet32_restart(dev, CSR0_NORMAL);
29591 netif_wake_queue(dev);
29592 }
29593@@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29594 if (!lp->mii)
29595 return 0;
29596
29597- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29598- val_out = lp->a.read_bcr(ioaddr, 34);
29599+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29600+ val_out = lp->a->read_bcr(ioaddr, 34);
29601
29602 return val_out;
29603 }
29604@@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29605 if (!lp->mii)
29606 return;
29607
29608- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29609- lp->a.write_bcr(ioaddr, 34, val);
29610+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29611+ lp->a->write_bcr(ioaddr, 34, val);
29612 }
29613
29614 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29615@@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29616 curr_link = mii_link_ok(&lp->mii_if);
29617 } else {
29618 ulong ioaddr = dev->base_addr; /* card base I/O address */
29619- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29620+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29621 }
29622 if (!curr_link) {
29623 if (prev_link || verbose) {
29624@@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29625 (ecmd.duplex == DUPLEX_FULL)
29626 ? "full" : "half");
29627 }
29628- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29629+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29630 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29631 if (lp->mii_if.full_duplex)
29632 bcr9 |= (1 << 0);
29633 else
29634 bcr9 &= ~(1 << 0);
29635- lp->a.write_bcr(dev->base_addr, 9, bcr9);
29636+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
29637 }
29638 } else {
29639 netif_info(lp, link, dev, "link up\n");
29640diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29641--- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29642+++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29643@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29644 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29645 struct ppp_stats stats;
29646 struct ppp_comp_stats cstats;
29647- char *vers;
29648
29649 switch (cmd) {
29650 case SIOCGPPPSTATS:
29651@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29652 break;
29653
29654 case SIOCGPPPVER:
29655- vers = PPP_VERSION;
29656- if (copy_to_user(addr, vers, strlen(vers) + 1))
29657+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29658 break;
29659 err = 0;
29660 break;
29661diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29662--- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29663+++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29664@@ -552,12 +552,12 @@ struct rtl8169_private {
29665 struct mdio_ops {
29666 void (*write)(void __iomem *, int, int);
29667 int (*read)(void __iomem *, int);
29668- } mdio_ops;
29669+ } __no_const mdio_ops;
29670
29671 struct pll_power_ops {
29672 void (*down)(struct rtl8169_private *);
29673 void (*up)(struct rtl8169_private *);
29674- } pll_power_ops;
29675+ } __no_const pll_power_ops;
29676
29677 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29678 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29679diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29680--- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29681+++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29682@@ -131,6 +131,7 @@
29683 #define CHIPREV_ID_5750_A0 0x4000
29684 #define CHIPREV_ID_5750_A1 0x4001
29685 #define CHIPREV_ID_5750_A3 0x4003
29686+#define CHIPREV_ID_5750_C1 0x4201
29687 #define CHIPREV_ID_5750_C2 0x4202
29688 #define CHIPREV_ID_5752_A0_HW 0x5000
29689 #define CHIPREV_ID_5752_A0 0x6000
29690diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29691--- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29692+++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29693@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29694
29695 static int __init abyss_init (void)
29696 {
29697- abyss_netdev_ops = tms380tr_netdev_ops;
29698+ pax_open_kernel();
29699+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29700
29701- abyss_netdev_ops.ndo_open = abyss_open;
29702- abyss_netdev_ops.ndo_stop = abyss_close;
29703+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29704+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29705+ pax_close_kernel();
29706
29707 return pci_register_driver(&abyss_driver);
29708 }
29709diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29710--- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29711+++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29712@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29713
29714 static int __init madgemc_init (void)
29715 {
29716- madgemc_netdev_ops = tms380tr_netdev_ops;
29717- madgemc_netdev_ops.ndo_open = madgemc_open;
29718- madgemc_netdev_ops.ndo_stop = madgemc_close;
29719+ pax_open_kernel();
29720+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29721+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29722+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29723+ pax_close_kernel();
29724
29725 return mca_register_driver (&madgemc_driver);
29726 }
29727diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29728--- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29729+++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29730@@ -353,9 +353,11 @@ static int __init proteon_init(void)
29731 struct platform_device *pdev;
29732 int i, num = 0, err = 0;
29733
29734- proteon_netdev_ops = tms380tr_netdev_ops;
29735- proteon_netdev_ops.ndo_open = proteon_open;
29736- proteon_netdev_ops.ndo_stop = tms380tr_close;
29737+ pax_open_kernel();
29738+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29739+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29740+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29741+ pax_close_kernel();
29742
29743 err = platform_driver_register(&proteon_driver);
29744 if (err)
29745diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29746--- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29747+++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29748@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29749 struct platform_device *pdev;
29750 int i, num = 0, err = 0;
29751
29752- sk_isa_netdev_ops = tms380tr_netdev_ops;
29753- sk_isa_netdev_ops.ndo_open = sk_isa_open;
29754- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29755+ pax_open_kernel();
29756+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29757+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29758+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29759+ pax_close_kernel();
29760
29761 err = platform_driver_register(&sk_isa_driver);
29762 if (err)
29763diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29764--- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29765+++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29766@@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29767 struct de_srom_info_leaf *il;
29768 void *bufp;
29769
29770+ pax_track_stack();
29771+
29772 /* download entire eeprom */
29773 for (i = 0; i < DE_EEPROM_WORDS; i++)
29774 ((__le16 *)ee_data)[i] =
29775diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29776--- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29777+++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29778@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29779 for (i=0; i<ETH_ALEN; i++) {
29780 tmp.addr[i] = dev->dev_addr[i];
29781 }
29782- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29783+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29784 break;
29785
29786 case DE4X5_SET_HWADDR: /* Set the hardware address */
29787@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29788 spin_lock_irqsave(&lp->lock, flags);
29789 memcpy(&statbuf, &lp->pktStats, ioc->len);
29790 spin_unlock_irqrestore(&lp->lock, flags);
29791- if (copy_to_user(ioc->data, &statbuf, ioc->len))
29792+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29793 return -EFAULT;
29794 break;
29795 }
29796diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29797--- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29798+++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29799@@ -71,7 +71,7 @@
29800 #include <asm/byteorder.h>
29801 #include <linux/serial_core.h>
29802 #include <linux/serial.h>
29803-
29804+#include <asm/local.h>
29805
29806 #define MOD_AUTHOR "Option Wireless"
29807 #define MOD_DESCRIPTION "USB High Speed Option driver"
29808@@ -257,7 +257,7 @@ struct hso_serial {
29809
29810 /* from usb_serial_port */
29811 struct tty_struct *tty;
29812- int open_count;
29813+ local_t open_count;
29814 spinlock_t serial_lock;
29815
29816 int (*write_data) (struct hso_serial *serial);
29817@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29818 struct urb *urb;
29819
29820 urb = serial->rx_urb[0];
29821- if (serial->open_count > 0) {
29822+ if (local_read(&serial->open_count) > 0) {
29823 count = put_rxbuf_data(urb, serial);
29824 if (count == -1)
29825 return;
29826@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29827 DUMP1(urb->transfer_buffer, urb->actual_length);
29828
29829 /* Anyone listening? */
29830- if (serial->open_count == 0)
29831+ if (local_read(&serial->open_count) == 0)
29832 return;
29833
29834 if (status == 0) {
29835@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29836 spin_unlock_irq(&serial->serial_lock);
29837
29838 /* check for port already opened, if not set the termios */
29839- serial->open_count++;
29840- if (serial->open_count == 1) {
29841+ if (local_inc_return(&serial->open_count) == 1) {
29842 serial->rx_state = RX_IDLE;
29843 /* Force default termio settings */
29844 _hso_serial_set_termios(tty, NULL);
29845@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29846 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29847 if (result) {
29848 hso_stop_serial_device(serial->parent);
29849- serial->open_count--;
29850+ local_dec(&serial->open_count);
29851 kref_put(&serial->parent->ref, hso_serial_ref_free);
29852 }
29853 } else {
29854@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29855
29856 /* reset the rts and dtr */
29857 /* do the actual close */
29858- serial->open_count--;
29859+ local_dec(&serial->open_count);
29860
29861- if (serial->open_count <= 0) {
29862- serial->open_count = 0;
29863+ if (local_read(&serial->open_count) <= 0) {
29864+ local_set(&serial->open_count, 0);
29865 spin_lock_irq(&serial->serial_lock);
29866 if (serial->tty == tty) {
29867 serial->tty->driver_data = NULL;
29868@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29869
29870 /* the actual setup */
29871 spin_lock_irqsave(&serial->serial_lock, flags);
29872- if (serial->open_count)
29873+ if (local_read(&serial->open_count))
29874 _hso_serial_set_termios(tty, old);
29875 else
29876 tty->termios = old;
29877@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29878 D1("Pending read interrupt on port %d\n", i);
29879 spin_lock(&serial->serial_lock);
29880 if (serial->rx_state == RX_IDLE &&
29881- serial->open_count > 0) {
29882+ local_read(&serial->open_count) > 0) {
29883 /* Setup and send a ctrl req read on
29884 * port i */
29885 if (!serial->rx_urb_filled[0]) {
29886@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29887 /* Start all serial ports */
29888 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29889 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29890- if (dev2ser(serial_table[i])->open_count) {
29891+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
29892 result =
29893 hso_start_serial_device(serial_table[i], GFP_NOIO);
29894 hso_kick_transmit(dev2ser(serial_table[i]));
29895diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29896--- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29897+++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29898@@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29899 * Return with error code if any of the queue indices
29900 * is out of range
29901 */
29902- if (p->ring_index[i] < 0 ||
29903- p->ring_index[i] >= adapter->num_rx_queues)
29904+ if (p->ring_index[i] >= adapter->num_rx_queues)
29905 return -EINVAL;
29906 }
29907
29908diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29909--- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29910+++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29911@@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29912 void (*link_down)(struct __vxge_hw_device *devh);
29913 void (*crit_err)(struct __vxge_hw_device *devh,
29914 enum vxge_hw_event type, u64 ext_data);
29915-};
29916+} __no_const;
29917
29918 /*
29919 * struct __vxge_hw_blockpool_entry - Block private data structure
29920diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29921--- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29922+++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29923@@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29924 struct sk_buff *completed[NR_SKB_COMPLETED];
29925 int more;
29926
29927+ pax_track_stack();
29928+
29929 do {
29930 more = 0;
29931 skb_ptr = completed;
29932@@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29933 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29934 int index;
29935
29936+ pax_track_stack();
29937+
29938 /*
29939 * Filling
29940 * - itable with bucket numbers
29941diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29942--- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29943+++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29944@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29945 struct vxge_hw_mempool_dma *dma_object,
29946 u32 index,
29947 u32 is_last);
29948-};
29949+} __no_const;
29950
29951 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29952 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29953diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29954--- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29955+++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29956@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29957 unsigned char hex[1024],
29958 * phex = hex;
29959
29960+ pax_track_stack();
29961+
29962 if (len >= (sizeof(hex) / 2))
29963 len = (sizeof(hex) / 2) - 1;
29964
29965diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
29966--- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
29967+++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
29968@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29969
29970 static int x25_open(struct net_device *dev)
29971 {
29972- struct lapb_register_struct cb;
29973+ static struct lapb_register_struct cb = {
29974+ .connect_confirmation = x25_connected,
29975+ .connect_indication = x25_connected,
29976+ .disconnect_confirmation = x25_disconnected,
29977+ .disconnect_indication = x25_disconnected,
29978+ .data_indication = x25_data_indication,
29979+ .data_transmit = x25_data_transmit
29980+ };
29981 int result;
29982
29983- cb.connect_confirmation = x25_connected;
29984- cb.connect_indication = x25_connected;
29985- cb.disconnect_confirmation = x25_disconnected;
29986- cb.disconnect_indication = x25_disconnected;
29987- cb.data_indication = x25_data_indication;
29988- cb.data_transmit = x25_data_transmit;
29989-
29990 result = lapb_register(dev, &cb);
29991 if (result != LAPB_OK)
29992 return result;
29993diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
29994--- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
29995+++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
29996@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29997 int do_autopm = 1;
29998 DECLARE_COMPLETION_ONSTACK(notif_completion);
29999
30000+ pax_track_stack();
30001+
30002 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30003 i2400m, ack, ack_size);
30004 BUG_ON(_ack == i2400m->bm_ack_buf);
30005diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30006--- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30007+++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30008@@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30009 BSSListElement * loop_net;
30010 BSSListElement * tmp_net;
30011
30012+ pax_track_stack();
30013+
30014 /* Blow away current list of scan results */
30015 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30016 list_move_tail (&loop_net->list, &ai->network_free_list);
30017@@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30018 WepKeyRid wkr;
30019 int rc;
30020
30021+ pax_track_stack();
30022+
30023 memset( &mySsid, 0, sizeof( mySsid ) );
30024 kfree (ai->flash);
30025 ai->flash = NULL;
30026@@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30027 __le32 *vals = stats.vals;
30028 int len;
30029
30030+ pax_track_stack();
30031+
30032 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30033 return -ENOMEM;
30034 data = file->private_data;
30035@@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30036 /* If doLoseSync is not 1, we won't do a Lose Sync */
30037 int doLoseSync = -1;
30038
30039+ pax_track_stack();
30040+
30041 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30042 return -ENOMEM;
30043 data = file->private_data;
30044@@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30045 int i;
30046 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30047
30048+ pax_track_stack();
30049+
30050 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30051 if (!qual)
30052 return -ENOMEM;
30053@@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30054 CapabilityRid cap_rid;
30055 __le32 *vals = stats_rid.vals;
30056
30057+ pax_track_stack();
30058+
30059 /* Get stats out of the card */
30060 clear_bit(JOB_WSTATS, &local->jobs);
30061 if (local->power.event) {
30062diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30063--- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30064+++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30065@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30066 unsigned int v;
30067 u64 tsf;
30068
30069+ pax_track_stack();
30070+
30071 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30072 len += snprintf(buf+len, sizeof(buf)-len,
30073 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30074@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30075 unsigned int len = 0;
30076 unsigned int i;
30077
30078+ pax_track_stack();
30079+
30080 len += snprintf(buf+len, sizeof(buf)-len,
30081 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30082
30083@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30084 unsigned int i;
30085 unsigned int v;
30086
30087+ pax_track_stack();
30088+
30089 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30090 sc->ah->ah_ant_mode);
30091 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30092@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30093 unsigned int len = 0;
30094 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30095
30096+ pax_track_stack();
30097+
30098 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30099 sc->bssidmask);
30100 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30101@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30102 unsigned int len = 0;
30103 int i;
30104
30105+ pax_track_stack();
30106+
30107 len += snprintf(buf+len, sizeof(buf)-len,
30108 "RX\n---------------------\n");
30109 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30110@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30111 char buf[700];
30112 unsigned int len = 0;
30113
30114+ pax_track_stack();
30115+
30116 len += snprintf(buf+len, sizeof(buf)-len,
30117 "HW has PHY error counters:\t%s\n",
30118 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30119@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30120 struct ath5k_buf *bf, *bf0;
30121 int i, n;
30122
30123+ pax_track_stack();
30124+
30125 len += snprintf(buf+len, sizeof(buf)-len,
30126 "available txbuffers: %d\n", sc->txbuf_len);
30127
30128diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30129--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30130+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30131@@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30132 s32 i, j, ip, im, nmeasurement;
30133 u8 nchains = get_streams(common->tx_chainmask);
30134
30135+ pax_track_stack();
30136+
30137 for (ip = 0; ip < MPASS; ip++) {
30138 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30139 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30140@@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30141 int i, ip, im, j;
30142 int nmeasurement;
30143
30144+ pax_track_stack();
30145+
30146 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30147 if (ah->txchainmask & (1 << i))
30148 num_chains++;
30149diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30150--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30151+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30152@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30153 int theta_low_bin = 0;
30154 int i;
30155
30156+ pax_track_stack();
30157+
30158 /* disregard any bin that contains <= 16 samples */
30159 thresh_accum_cnt = 16;
30160 scale_factor = 5;
30161diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30162--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30163+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30164@@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30165 char buf[512];
30166 unsigned int len = 0;
30167
30168+ pax_track_stack();
30169+
30170 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30171 len += snprintf(buf + len, sizeof(buf) - len,
30172 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30173@@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30174 u8 addr[ETH_ALEN];
30175 u32 tmp;
30176
30177+ pax_track_stack();
30178+
30179 len += snprintf(buf + len, sizeof(buf) - len,
30180 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30181 wiphy_name(sc->hw->wiphy),
30182diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30183--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30184+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30185@@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30186 unsigned int len = 0;
30187 int ret = 0;
30188
30189+ pax_track_stack();
30190+
30191 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30192
30193 WMI_CMD(WMI_TGT_STATS_CMDID);
30194@@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30195 char buf[512];
30196 unsigned int len = 0;
30197
30198+ pax_track_stack();
30199+
30200 len += snprintf(buf + len, sizeof(buf) - len,
30201 "%20s : %10u\n", "Buffers queued",
30202 priv->debug.tx_stats.buf_queued);
30203@@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30204 char buf[512];
30205 unsigned int len = 0;
30206
30207+ pax_track_stack();
30208+
30209 len += snprintf(buf + len, sizeof(buf) - len,
30210 "%20s : %10u\n", "SKBs allocated",
30211 priv->debug.rx_stats.skb_allocated);
30212diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30213--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30214+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30215@@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30216
30217 /* ANI */
30218 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30219-};
30220+} __no_const;
30221
30222 /**
30223 * struct ath_hw_ops - callbacks used by hardware code and driver code
30224@@ -642,7 +642,7 @@ struct ath_hw_ops {
30225 u32 burstDuration);
30226 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30227 u32 vmf);
30228-};
30229+} __no_const;
30230
30231 struct ath_nf_limits {
30232 s16 max;
30233diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30234--- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30235+++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30236@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30237 int err;
30238 DECLARE_SSID_BUF(ssid);
30239
30240+ pax_track_stack();
30241+
30242 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30243
30244 if (ssid_len)
30245@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30246 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30247 int err;
30248
30249+ pax_track_stack();
30250+
30251 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30252 idx, keylen, len);
30253
30254diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30255--- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30256+++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30257@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30258 unsigned long flags;
30259 DECLARE_SSID_BUF(ssid);
30260
30261+ pax_track_stack();
30262+
30263 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30264 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30265 print_ssid(ssid, info_element->data, info_element->len),
30266diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30267--- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30268+++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30269@@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30270 */
30271 if (iwl3945_mod_params.disable_hw_scan) {
30272 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30273- iwl3945_hw_ops.hw_scan = NULL;
30274+ pax_open_kernel();
30275+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30276+ pax_close_kernel();
30277 }
30278
30279 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30280diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30281--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30282+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30283@@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30284 if (cfg->mod_params->disable_hw_scan) {
30285 dev_printk(KERN_DEBUG, &(pdev->dev),
30286 "sw scan support is deprecated\n");
30287- iwlagn_hw_ops.hw_scan = NULL;
30288+ pax_open_kernel();
30289+ *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30290+ pax_close_kernel();
30291 }
30292
30293 hw = iwl_alloc_all(cfg);
30294diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30295--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30296+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30297@@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30298 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30299 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30300
30301+ pax_track_stack();
30302+
30303 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30304
30305 /* Treat uninitialized rate scaling data same as non-existing. */
30306@@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30307 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30308 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30309
30310+ pax_track_stack();
30311+
30312 /* Override starting rate (index 0) if needed for debug purposes */
30313 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30314
30315diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30316--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30317+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30318@@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30319 int pos = 0;
30320 const size_t bufsz = sizeof(buf);
30321
30322+ pax_track_stack();
30323+
30324 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30325 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30326 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30327@@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30328 char buf[256 * NUM_IWL_RXON_CTX];
30329 const size_t bufsz = sizeof(buf);
30330
30331+ pax_track_stack();
30332+
30333 for_each_context(priv, ctx) {
30334 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30335 ctx->ctxid);
30336diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30337--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30338+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30339@@ -68,8 +68,8 @@ do {
30340 } while (0)
30341
30342 #else
30343-#define IWL_DEBUG(__priv, level, fmt, args...)
30344-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30345+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30346+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30347 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30348 const void *p, u32 len)
30349 {}
30350diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30351--- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30352+++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30353@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30354 int buf_len = 512;
30355 size_t len = 0;
30356
30357+ pax_track_stack();
30358+
30359 if (*ppos != 0)
30360 return 0;
30361 if (count < sizeof(buf))
30362diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30363--- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30364+++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30365@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30366 return -EINVAL;
30367
30368 if (fake_hw_scan) {
30369- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30370- mac80211_hwsim_ops.sw_scan_start = NULL;
30371- mac80211_hwsim_ops.sw_scan_complete = NULL;
30372+ pax_open_kernel();
30373+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30374+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30375+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30376+ pax_close_kernel();
30377 }
30378
30379 spin_lock_init(&hwsim_radio_lock);
30380diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30381--- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30382+++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30383@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30384
30385 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30386
30387- if (rts_threshold < 0 || rts_threshold > 2347)
30388+ if (rts_threshold > 2347)
30389 rts_threshold = 2347;
30390
30391 tmp = cpu_to_le32(rts_threshold);
30392diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30393--- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30394+++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30395@@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30396 u8 rfpath;
30397 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30398
30399+ pax_track_stack();
30400+
30401 precommoncmdcnt = 0;
30402 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30403 MAX_PRECMD_CNT,
30404diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30405--- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30406+++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30407@@ -260,7 +260,7 @@ struct wl1251_if_operations {
30408 void (*reset)(struct wl1251 *wl);
30409 void (*enable_irq)(struct wl1251 *wl);
30410 void (*disable_irq)(struct wl1251 *wl);
30411-};
30412+} __no_const;
30413
30414 struct wl1251 {
30415 struct ieee80211_hw *hw;
30416diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30417--- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30418+++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30419@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30420 u32 chunk_len;
30421 int i;
30422
30423+ pax_track_stack();
30424+
30425 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30426
30427 spi_message_init(&m);
30428diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30429--- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30430+++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30431@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30432 if (cookie == NO_COOKIE)
30433 offset = pc;
30434 if (cookie == INVALID_COOKIE) {
30435- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30436+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30437 offset = pc;
30438 }
30439 if (cookie != last_cookie) {
30440@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30441 /* add userspace sample */
30442
30443 if (!mm) {
30444- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30445+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30446 return 0;
30447 }
30448
30449 cookie = lookup_dcookie(mm, s->eip, &offset);
30450
30451 if (cookie == INVALID_COOKIE) {
30452- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30453+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30454 return 0;
30455 }
30456
30457@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30458 /* ignore backtraces if failed to add a sample */
30459 if (state == sb_bt_start) {
30460 state = sb_bt_ignore;
30461- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30462+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30463 }
30464 }
30465 release_mm(mm);
30466diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30467--- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30468+++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30469@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30470 }
30471
30472 if (buffer_pos == buffer_size) {
30473- atomic_inc(&oprofile_stats.event_lost_overflow);
30474+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30475 return;
30476 }
30477
30478diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30479--- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30480+++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30481@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30482 if (oprofile_ops.switch_events())
30483 return;
30484
30485- atomic_inc(&oprofile_stats.multiplex_counter);
30486+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30487 start_switch_worker();
30488 }
30489
30490diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30491--- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30492+++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30493@@ -186,7 +186,7 @@ static const struct file_operations atom
30494
30495
30496 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30497- char const *name, atomic_t *val)
30498+ char const *name, atomic_unchecked_t *val)
30499 {
30500 return __oprofilefs_create_file(sb, root, name,
30501 &atomic_ro_fops, 0444, val);
30502diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30503--- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30504+++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30505@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30506 cpu_buf->sample_invalid_eip = 0;
30507 }
30508
30509- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30510- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30511- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30512- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30513- atomic_set(&oprofile_stats.multiplex_counter, 0);
30514+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30515+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30516+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30517+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30518+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30519 }
30520
30521
30522diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30523--- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30524+++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30525@@ -13,11 +13,11 @@
30526 #include <asm/atomic.h>
30527
30528 struct oprofile_stat_struct {
30529- atomic_t sample_lost_no_mm;
30530- atomic_t sample_lost_no_mapping;
30531- atomic_t bt_lost_no_mapping;
30532- atomic_t event_lost_overflow;
30533- atomic_t multiplex_counter;
30534+ atomic_unchecked_t sample_lost_no_mm;
30535+ atomic_unchecked_t sample_lost_no_mapping;
30536+ atomic_unchecked_t bt_lost_no_mapping;
30537+ atomic_unchecked_t event_lost_overflow;
30538+ atomic_unchecked_t multiplex_counter;
30539 };
30540
30541 extern struct oprofile_stat_struct oprofile_stats;
30542diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30543--- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30544+++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30545@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30546
30547 *ppos += len;
30548
30549- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30550+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30551 }
30552
30553 #ifdef CONFIG_PARPORT_1284
30554@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30555
30556 *ppos += len;
30557
30558- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30559+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30560 }
30561 #endif /* IEEE1284.3 support. */
30562
30563diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30564--- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30565+++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30566@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30567 int (*hardware_test) (struct slot* slot, u32 value);
30568 u8 (*get_power) (struct slot* slot);
30569 int (*set_power) (struct slot* slot, int value);
30570-};
30571+} __no_const;
30572
30573 struct cpci_hp_controller {
30574 unsigned int irq;
30575diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30576--- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30577+++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30578@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30579
30580 void compaq_nvram_init (void __iomem *rom_start)
30581 {
30582+
30583+#ifndef CONFIG_PAX_KERNEXEC
30584 if (rom_start) {
30585 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30586 }
30587+#endif
30588+
30589 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30590
30591 /* initialize our int15 lock */
30592diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30593--- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30594+++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30595@@ -27,9 +27,9 @@
30596 #define MODULE_PARAM_PREFIX "pcie_aspm."
30597
30598 /* Note: those are not register definitions */
30599-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30600-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30601-#define ASPM_STATE_L1 (4) /* L1 state */
30602+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30603+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30604+#define ASPM_STATE_L1 (4U) /* L1 state */
30605 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30606 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30607
30608diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30609--- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30610+++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30611@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30612 return ret;
30613 }
30614
30615-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30616+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30617 struct device_attribute *attr,
30618 char *buf)
30619 {
30620 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30621 }
30622
30623-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30624+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30625 struct device_attribute *attr,
30626 char *buf)
30627 {
30628@@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30629 u32 l, sz, mask;
30630 u16 orig_cmd;
30631
30632- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30633+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30634
30635 if (!dev->mmio_always_on) {
30636 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30637diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30638--- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30639+++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30640@@ -476,7 +476,16 @@ static const struct file_operations proc
30641 static int __init pci_proc_init(void)
30642 {
30643 struct pci_dev *dev = NULL;
30644+
30645+#ifdef CONFIG_GRKERNSEC_PROC_ADD
30646+#ifdef CONFIG_GRKERNSEC_PROC_USER
30647+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30648+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30649+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30650+#endif
30651+#else
30652 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30653+#endif
30654 proc_create("devices", 0, proc_bus_pci_dir,
30655 &proc_bus_pci_dev_operations);
30656 proc_initialized = 1;
30657diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30658--- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30659+++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30660@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30661 struct pcifront_sd *sd = bus->sysdata;
30662 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30663
30664+ pax_track_stack();
30665+
30666 if (verbose_request)
30667 dev_info(&pdev->xdev->dev,
30668 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30669@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30670 struct pcifront_sd *sd = bus->sysdata;
30671 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30672
30673+ pax_track_stack();
30674+
30675 if (verbose_request)
30676 dev_info(&pdev->xdev->dev,
30677 "write dev=%04x:%02x:%02x.%01x - "
30678@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30679 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30680 struct msi_desc *entry;
30681
30682+ pax_track_stack();
30683+
30684 if (nvec > SH_INFO_MAX_VEC) {
30685 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30686 " Increase SH_INFO_MAX_VEC.\n", nvec);
30687@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30688 struct pcifront_sd *sd = dev->bus->sysdata;
30689 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30690
30691+ pax_track_stack();
30692+
30693 err = do_pci_op(pdev, &op);
30694
30695 /* What should do for error ? */
30696@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30697 struct pcifront_sd *sd = dev->bus->sysdata;
30698 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30699
30700+ pax_track_stack();
30701+
30702 err = do_pci_op(pdev, &op);
30703 if (likely(!err)) {
30704 vector[0] = op.value;
30705diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30706--- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30707+++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30708@@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30709 return 0;
30710 }
30711
30712-void static hotkey_mask_warn_incomplete_mask(void)
30713+static void hotkey_mask_warn_incomplete_mask(void)
30714 {
30715 /* log only what the user can fix... */
30716 const u32 wantedmask = hotkey_driver_mask &
30717diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30718--- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30719+++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30720@@ -59,7 +59,7 @@ do { \
30721 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30722 } while(0)
30723
30724-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30725+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30726 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30727
30728 /*
30729@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30730
30731 cpu = get_cpu();
30732 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30733+
30734+ pax_open_kernel();
30735 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30736+ pax_close_kernel();
30737
30738 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30739 spin_lock_irqsave(&pnp_bios_lock, flags);
30740@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30741 :"memory");
30742 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30743
30744+ pax_open_kernel();
30745 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30746+ pax_close_kernel();
30747+
30748 put_cpu();
30749
30750 /* If we get here and this is set then the PnP BIOS faulted on us. */
30751@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30752 return status;
30753 }
30754
30755-void pnpbios_calls_init(union pnp_bios_install_struct *header)
30756+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30757 {
30758 int i;
30759
30760@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30761 pnp_bios_callpoint.offset = header->fields.pm16offset;
30762 pnp_bios_callpoint.segment = PNP_CS16;
30763
30764+ pax_open_kernel();
30765+
30766 for_each_possible_cpu(i) {
30767 struct desc_struct *gdt = get_cpu_gdt_table(i);
30768 if (!gdt)
30769@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30770 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30771 (unsigned long)__va(header->fields.pm16dseg));
30772 }
30773+
30774+ pax_close_kernel();
30775 }
30776diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30777--- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30778+++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30779@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30780 return 1;
30781
30782 /* check if the resource is valid */
30783- if (*irq < 0 || *irq > 15)
30784+ if (*irq > 15)
30785 return 0;
30786
30787 /* check if the resource is reserved */
30788@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30789 return 1;
30790
30791 /* check if the resource is valid */
30792- if (*dma < 0 || *dma == 4 || *dma > 7)
30793+ if (*dma == 4 || *dma > 7)
30794 return 0;
30795
30796 /* check if the resource is reserved */
30797diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30798--- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30799+++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30800@@ -66,7 +66,7 @@
30801 struct bq27x00_device_info;
30802 struct bq27x00_access_methods {
30803 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30804-};
30805+} __no_const;
30806
30807 enum bq27x00_chip { BQ27000, BQ27500 };
30808
30809diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30810--- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30811+++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30812@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30813 max8660->shadow_regs[MAX8660_OVER1] = 5;
30814 } else {
30815 /* Otherwise devices can be toggled via software */
30816- max8660_dcdc_ops.enable = max8660_dcdc_enable;
30817- max8660_dcdc_ops.disable = max8660_dcdc_disable;
30818+ pax_open_kernel();
30819+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30820+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30821+ pax_close_kernel();
30822 }
30823
30824 /*
30825diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30826--- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30827+++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30828@@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30829 }
30830 mc13xxx_unlock(mc13892);
30831
30832- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30833+ pax_open_kernel();
30834+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30835 = mc13892_vcam_set_mode;
30836- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30837+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30838 = mc13892_vcam_get_mode;
30839+ pax_close_kernel();
30840 for (i = 0; i < pdata->num_regulators; i++) {
30841 init_data = &pdata->regulators[i];
30842 priv->regulators[i] = regulator_register(
30843diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30844--- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30845+++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30846@@ -14,6 +14,7 @@
30847 #include <linux/module.h>
30848 #include <linux/rtc.h>
30849 #include <linux/sched.h>
30850+#include <linux/grsecurity.h>
30851 #include "rtc-core.h"
30852
30853 static dev_t rtc_devt;
30854@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30855 if (copy_from_user(&tm, uarg, sizeof(tm)))
30856 return -EFAULT;
30857
30858+ gr_log_timechange();
30859+
30860 return rtc_set_time(rtc, &tm);
30861
30862 case RTC_PIE_ON:
30863diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30864--- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30865+++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30866@@ -492,7 +492,7 @@ struct adapter_ops
30867 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30868 /* Administrative operations */
30869 int (*adapter_comm)(struct aac_dev * dev, int comm);
30870-};
30871+} __no_const;
30872
30873 /*
30874 * Define which interrupt handler needs to be installed
30875diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30876--- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30877+++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30878@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30879 u32 actual_fibsize64, actual_fibsize = 0;
30880 int i;
30881
30882+ pax_track_stack();
30883
30884 if (dev->in_reset) {
30885 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30886diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30887--- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30888+++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30889@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30890 flash_error_table[i].reason);
30891 }
30892
30893-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30894+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30895 asd_show_update_bios, asd_store_update_bios);
30896
30897 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30898diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30899--- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30900+++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30901@@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30902 struct bfad_vport_s *vport, *vport_new;
30903 struct bfa_fcs_driver_info_s driver_info;
30904
30905+ pax_track_stack();
30906+
30907 /* Fill the driver_info info to fcs*/
30908 memset(&driver_info, 0, sizeof(driver_info));
30909 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30910diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30911--- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30912+++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30913@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30914 u16 len, count;
30915 u16 templen;
30916
30917+ pax_track_stack();
30918+
30919 /*
30920 * get hba attributes
30921 */
30922@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30923 u8 count = 0;
30924 u16 templen;
30925
30926+ pax_track_stack();
30927+
30928 /*
30929 * get port attributes
30930 */
30931diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30932--- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30933+++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30934@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30935 struct fc_rpsc_speed_info_s speeds;
30936 struct bfa_port_attr_s pport_attr;
30937
30938+ pax_track_stack();
30939+
30940 bfa_trc(port->fcs, rx_fchs->s_id);
30941 bfa_trc(port->fcs, rx_fchs->d_id);
30942
30943diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30944--- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30945+++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30946@@ -238,7 +238,7 @@ struct bfa_hwif_s {
30947 u32 *nvecs, u32 *maxvec);
30948 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30949 u32 *end);
30950-};
30951+} __no_const;
30952 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30953
30954 struct bfa_iocfc_s {
30955diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30956--- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30957+++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30958@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30959 bfa_ioc_disable_cbfn_t disable_cbfn;
30960 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30961 bfa_ioc_reset_cbfn_t reset_cbfn;
30962-};
30963+} __no_const;
30964
30965 /*
30966 * Heartbeat failure notification queue element.
30967@@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
30968 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30969 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30970 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30971-};
30972+} __no_const;
30973
30974 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30975 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30976diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
30977--- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
30978+++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
30979@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30980 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30981 *PrototypeHostAdapter)
30982 {
30983+ pax_track_stack();
30984+
30985 /*
30986 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30987 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30988diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
30989--- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
30990+++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
30991@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30992 dma_addr_t addr;
30993 ulong flags = 0;
30994
30995+ pax_track_stack();
30996+
30997 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30998 // get user msg size in u32s
30999 if(get_user(size, &user_msg[0])){
31000@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31001 s32 rcode;
31002 dma_addr_t addr;
31003
31004+ pax_track_stack();
31005+
31006 memset(msg, 0 , sizeof(msg));
31007 len = scsi_bufflen(cmd);
31008 direction = 0x00000000;
31009diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31010--- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31011+++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31012@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31013 struct hostdata *ha;
31014 char name[16];
31015
31016+ pax_track_stack();
31017+
31018 sprintf(name, "%s%d", driver_name, j);
31019
31020 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31021diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31022--- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31023+++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31024@@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31025 } buf;
31026 int rc;
31027
31028+ pax_track_stack();
31029+
31030 fiph = (struct fip_header *)skb->data;
31031 sub = fiph->fip_subcode;
31032
31033diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31034--- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31035+++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31036@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31037 unsigned long flags;
31038 gdth_ha_str *ha;
31039
31040+ pax_track_stack();
31041+
31042 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31043 return -EFAULT;
31044 ha = gdth_find_ha(ldrv.ionode);
31045@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31046 gdth_ha_str *ha;
31047 int rval;
31048
31049+ pax_track_stack();
31050+
31051 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31052 res.number >= MAX_HDRIVES)
31053 return -EFAULT;
31054@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31055 gdth_ha_str *ha;
31056 int rval;
31057
31058+ pax_track_stack();
31059+
31060 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31061 return -EFAULT;
31062 ha = gdth_find_ha(gen.ionode);
31063@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31064 int i;
31065 gdth_cmd_str gdtcmd;
31066 char cmnd[MAX_COMMAND_SIZE];
31067+
31068+ pax_track_stack();
31069+
31070 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31071
31072 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31073diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31074--- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31075+++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31076@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31077 u64 paddr;
31078
31079 char cmnd[MAX_COMMAND_SIZE];
31080+
31081+ pax_track_stack();
31082+
31083 memset(cmnd, 0xff, 12);
31084 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31085
31086@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31087 gdth_hget_str *phg;
31088 char cmnd[MAX_COMMAND_SIZE];
31089
31090+ pax_track_stack();
31091+
31092 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31093 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31094 if (!gdtcmd || !estr)
31095diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31096--- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31097+++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31098@@ -42,7 +42,7 @@
31099 #include "scsi_logging.h"
31100
31101
31102-static atomic_t scsi_host_next_hn; /* host_no for next new host */
31103+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31104
31105
31106 static void scsi_host_cls_release(struct device *dev)
31107@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31108 * subtract one because we increment first then return, but we need to
31109 * know what the next host number was before increment
31110 */
31111- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31112+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31113 shost->dma_channel = 0xff;
31114
31115 /* These three are default values which can be overridden */
31116diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31117--- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31118+++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31119@@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31120 u32 a;
31121
31122 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31123- return h->access.command_completed(h);
31124+ return h->access->command_completed(h);
31125
31126 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31127 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31128@@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31129 while (!list_empty(&h->reqQ)) {
31130 c = list_entry(h->reqQ.next, struct CommandList, list);
31131 /* can't do anything if fifo is full */
31132- if ((h->access.fifo_full(h))) {
31133+ if ((h->access->fifo_full(h))) {
31134 dev_warn(&h->pdev->dev, "fifo full\n");
31135 break;
31136 }
31137@@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31138 h->Qdepth--;
31139
31140 /* Tell the controller execute command */
31141- h->access.submit_command(h, c);
31142+ h->access->submit_command(h, c);
31143
31144 /* Put job onto the completed Q */
31145 addQ(&h->cmpQ, c);
31146@@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31147
31148 static inline unsigned long get_next_completion(struct ctlr_info *h)
31149 {
31150- return h->access.command_completed(h);
31151+ return h->access->command_completed(h);
31152 }
31153
31154 static inline bool interrupt_pending(struct ctlr_info *h)
31155 {
31156- return h->access.intr_pending(h);
31157+ return h->access->intr_pending(h);
31158 }
31159
31160 static inline long interrupt_not_for_us(struct ctlr_info *h)
31161 {
31162- return (h->access.intr_pending(h) == 0) ||
31163+ return (h->access->intr_pending(h) == 0) ||
31164 (h->interrupts_enabled == 0);
31165 }
31166
31167@@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31168 if (prod_index < 0)
31169 return -ENODEV;
31170 h->product_name = products[prod_index].product_name;
31171- h->access = *(products[prod_index].access);
31172+ h->access = products[prod_index].access;
31173
31174 if (hpsa_board_disabled(h->pdev)) {
31175 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31176@@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31177 }
31178
31179 /* make sure the board interrupts are off */
31180- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31181+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31182
31183 if (h->msix_vector || h->msi_vector)
31184 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31185@@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31186 hpsa_scsi_setup(h);
31187
31188 /* Turn the interrupts on so we can service requests */
31189- h->access.set_intr_mask(h, HPSA_INTR_ON);
31190+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31191
31192 hpsa_put_ctlr_into_performant_mode(h);
31193 hpsa_hba_inquiry(h);
31194@@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31195 * To write all data in the battery backed cache to disks
31196 */
31197 hpsa_flush_cache(h);
31198- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31199+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31200 free_irq(h->intr[h->intr_mode], h);
31201 #ifdef CONFIG_PCI_MSI
31202 if (h->msix_vector)
31203@@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31204 return;
31205 }
31206 /* Change the access methods to the performant access methods */
31207- h->access = SA5_performant_access;
31208+ h->access = &SA5_performant_access;
31209 h->transMethod = CFGTBL_Trans_Performant;
31210 }
31211
31212diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31213--- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31214+++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31215@@ -73,7 +73,7 @@ struct ctlr_info {
31216 unsigned int msix_vector;
31217 unsigned int msi_vector;
31218 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31219- struct access_method access;
31220+ struct access_method *access;
31221
31222 /* queue and queue Info */
31223 struct list_head reqQ;
31224diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31225--- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31226+++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31227@@ -1027,7 +1027,7 @@ typedef struct {
31228 int (*intr)(struct ips_ha *);
31229 void (*enableint)(struct ips_ha *);
31230 uint32_t (*statupd)(struct ips_ha *);
31231-} ips_hw_func_t;
31232+} __no_const ips_hw_func_t;
31233
31234 typedef struct ips_ha {
31235 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31236diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31237--- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31238+++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31239@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31240 * all together if not used XXX
31241 */
31242 struct {
31243- atomic_t no_free_exch;
31244- atomic_t no_free_exch_xid;
31245- atomic_t xid_not_found;
31246- atomic_t xid_busy;
31247- atomic_t seq_not_found;
31248- atomic_t non_bls_resp;
31249+ atomic_unchecked_t no_free_exch;
31250+ atomic_unchecked_t no_free_exch_xid;
31251+ atomic_unchecked_t xid_not_found;
31252+ atomic_unchecked_t xid_busy;
31253+ atomic_unchecked_t seq_not_found;
31254+ atomic_unchecked_t non_bls_resp;
31255 } stats;
31256 };
31257
31258@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31259 /* allocate memory for exchange */
31260 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31261 if (!ep) {
31262- atomic_inc(&mp->stats.no_free_exch);
31263+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31264 goto out;
31265 }
31266 memset(ep, 0, sizeof(*ep));
31267@@ -761,7 +761,7 @@ out:
31268 return ep;
31269 err:
31270 spin_unlock_bh(&pool->lock);
31271- atomic_inc(&mp->stats.no_free_exch_xid);
31272+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31273 mempool_free(ep, mp->ep_pool);
31274 return NULL;
31275 }
31276@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31277 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31278 ep = fc_exch_find(mp, xid);
31279 if (!ep) {
31280- atomic_inc(&mp->stats.xid_not_found);
31281+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31282 reject = FC_RJT_OX_ID;
31283 goto out;
31284 }
31285@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31286 ep = fc_exch_find(mp, xid);
31287 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31288 if (ep) {
31289- atomic_inc(&mp->stats.xid_busy);
31290+ atomic_inc_unchecked(&mp->stats.xid_busy);
31291 reject = FC_RJT_RX_ID;
31292 goto rel;
31293 }
31294@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31295 }
31296 xid = ep->xid; /* get our XID */
31297 } else if (!ep) {
31298- atomic_inc(&mp->stats.xid_not_found);
31299+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31300 reject = FC_RJT_RX_ID; /* XID not found */
31301 goto out;
31302 }
31303@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31304 } else {
31305 sp = &ep->seq;
31306 if (sp->id != fh->fh_seq_id) {
31307- atomic_inc(&mp->stats.seq_not_found);
31308+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31309 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31310 goto rel;
31311 }
31312@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31313
31314 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31315 if (!ep) {
31316- atomic_inc(&mp->stats.xid_not_found);
31317+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31318 goto out;
31319 }
31320 if (ep->esb_stat & ESB_ST_COMPLETE) {
31321- atomic_inc(&mp->stats.xid_not_found);
31322+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31323 goto rel;
31324 }
31325 if (ep->rxid == FC_XID_UNKNOWN)
31326 ep->rxid = ntohs(fh->fh_rx_id);
31327 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31328- atomic_inc(&mp->stats.xid_not_found);
31329+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31330 goto rel;
31331 }
31332 if (ep->did != ntoh24(fh->fh_s_id) &&
31333 ep->did != FC_FID_FLOGI) {
31334- atomic_inc(&mp->stats.xid_not_found);
31335+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31336 goto rel;
31337 }
31338 sof = fr_sof(fp);
31339@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31340 sp->ssb_stat |= SSB_ST_RESP;
31341 sp->id = fh->fh_seq_id;
31342 } else if (sp->id != fh->fh_seq_id) {
31343- atomic_inc(&mp->stats.seq_not_found);
31344+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31345 goto rel;
31346 }
31347
31348@@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31349 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31350
31351 if (!sp)
31352- atomic_inc(&mp->stats.xid_not_found);
31353+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31354 else
31355- atomic_inc(&mp->stats.non_bls_resp);
31356+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31357
31358 fc_frame_free(fp);
31359 }
31360diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31361--- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31362+++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31363@@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31364 .postreset = ata_std_postreset,
31365 .error_handler = ata_std_error_handler,
31366 .post_internal_cmd = sas_ata_post_internal,
31367- .qc_defer = ata_std_qc_defer,
31368+ .qc_defer = ata_std_qc_defer,
31369 .qc_prep = ata_noop_qc_prep,
31370 .qc_issue = sas_ata_qc_issue,
31371 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31372diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31373--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31374+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31375@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31376
31377 #include <linux/debugfs.h>
31378
31379-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31380+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31381 static unsigned long lpfc_debugfs_start_time = 0L;
31382
31383 /* iDiag */
31384@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31385 lpfc_debugfs_enable = 0;
31386
31387 len = 0;
31388- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31389+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31390 (lpfc_debugfs_max_disc_trc - 1);
31391 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31392 dtp = vport->disc_trc + i;
31393@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31394 lpfc_debugfs_enable = 0;
31395
31396 len = 0;
31397- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31398+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31399 (lpfc_debugfs_max_slow_ring_trc - 1);
31400 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31401 dtp = phba->slow_ring_trc + i;
31402@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31403 uint32_t *ptr;
31404 char buffer[1024];
31405
31406+ pax_track_stack();
31407+
31408 off = 0;
31409 spin_lock_irq(&phba->hbalock);
31410
31411@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31412 !vport || !vport->disc_trc)
31413 return;
31414
31415- index = atomic_inc_return(&vport->disc_trc_cnt) &
31416+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31417 (lpfc_debugfs_max_disc_trc - 1);
31418 dtp = vport->disc_trc + index;
31419 dtp->fmt = fmt;
31420 dtp->data1 = data1;
31421 dtp->data2 = data2;
31422 dtp->data3 = data3;
31423- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31424+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31425 dtp->jif = jiffies;
31426 #endif
31427 return;
31428@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31429 !phba || !phba->slow_ring_trc)
31430 return;
31431
31432- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31433+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31434 (lpfc_debugfs_max_slow_ring_trc - 1);
31435 dtp = phba->slow_ring_trc + index;
31436 dtp->fmt = fmt;
31437 dtp->data1 = data1;
31438 dtp->data2 = data2;
31439 dtp->data3 = data3;
31440- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31441+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31442 dtp->jif = jiffies;
31443 #endif
31444 return;
31445@@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31446 "slow_ring buffer\n");
31447 goto debug_failed;
31448 }
31449- atomic_set(&phba->slow_ring_trc_cnt, 0);
31450+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31451 memset(phba->slow_ring_trc, 0,
31452 (sizeof(struct lpfc_debugfs_trc) *
31453 lpfc_debugfs_max_slow_ring_trc));
31454@@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31455 "buffer\n");
31456 goto debug_failed;
31457 }
31458- atomic_set(&vport->disc_trc_cnt, 0);
31459+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31460
31461 snprintf(name, sizeof(name), "discovery_trace");
31462 vport->debug_disc_trc =
31463diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31464--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31465+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31466@@ -419,7 +419,7 @@ struct lpfc_vport {
31467 struct dentry *debug_nodelist;
31468 struct dentry *vport_debugfs_root;
31469 struct lpfc_debugfs_trc *disc_trc;
31470- atomic_t disc_trc_cnt;
31471+ atomic_unchecked_t disc_trc_cnt;
31472 #endif
31473 uint8_t stat_data_enabled;
31474 uint8_t stat_data_blocked;
31475@@ -785,8 +785,8 @@ struct lpfc_hba {
31476 struct timer_list fabric_block_timer;
31477 unsigned long bit_flags;
31478 #define FABRIC_COMANDS_BLOCKED 0
31479- atomic_t num_rsrc_err;
31480- atomic_t num_cmd_success;
31481+ atomic_unchecked_t num_rsrc_err;
31482+ atomic_unchecked_t num_cmd_success;
31483 unsigned long last_rsrc_error_time;
31484 unsigned long last_ramp_down_time;
31485 unsigned long last_ramp_up_time;
31486@@ -800,7 +800,7 @@ struct lpfc_hba {
31487 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31488 struct dentry *debug_slow_ring_trc;
31489 struct lpfc_debugfs_trc *slow_ring_trc;
31490- atomic_t slow_ring_trc_cnt;
31491+ atomic_unchecked_t slow_ring_trc_cnt;
31492 /* iDiag debugfs sub-directory */
31493 struct dentry *idiag_root;
31494 struct dentry *idiag_pci_cfg;
31495diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31496--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31497+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31498@@ -9535,8 +9535,10 @@ lpfc_init(void)
31499 printk(LPFC_COPYRIGHT "\n");
31500
31501 if (lpfc_enable_npiv) {
31502- lpfc_transport_functions.vport_create = lpfc_vport_create;
31503- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31504+ pax_open_kernel();
31505+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31506+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31507+ pax_close_kernel();
31508 }
31509 lpfc_transport_template =
31510 fc_attach_transport(&lpfc_transport_functions);
31511diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31512--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31513+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31514@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31515 uint32_t evt_posted;
31516
31517 spin_lock_irqsave(&phba->hbalock, flags);
31518- atomic_inc(&phba->num_rsrc_err);
31519+ atomic_inc_unchecked(&phba->num_rsrc_err);
31520 phba->last_rsrc_error_time = jiffies;
31521
31522 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31523@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31524 unsigned long flags;
31525 struct lpfc_hba *phba = vport->phba;
31526 uint32_t evt_posted;
31527- atomic_inc(&phba->num_cmd_success);
31528+ atomic_inc_unchecked(&phba->num_cmd_success);
31529
31530 if (vport->cfg_lun_queue_depth <= queue_depth)
31531 return;
31532@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31533 unsigned long num_rsrc_err, num_cmd_success;
31534 int i;
31535
31536- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31537- num_cmd_success = atomic_read(&phba->num_cmd_success);
31538+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31539+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31540
31541 vports = lpfc_create_vport_work_array(phba);
31542 if (vports != NULL)
31543@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31544 }
31545 }
31546 lpfc_destroy_vport_work_array(phba, vports);
31547- atomic_set(&phba->num_rsrc_err, 0);
31548- atomic_set(&phba->num_cmd_success, 0);
31549+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31550+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31551 }
31552
31553 /**
31554@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31555 }
31556 }
31557 lpfc_destroy_vport_work_array(phba, vports);
31558- atomic_set(&phba->num_rsrc_err, 0);
31559- atomic_set(&phba->num_cmd_success, 0);
31560+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31561+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31562 }
31563
31564 /**
31565diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31566--- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31567+++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31568@@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31569 int rval;
31570 int i;
31571
31572+ pax_track_stack();
31573+
31574 // Allocate memory for the base list of scb for management module.
31575 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31576
31577diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31578--- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31579+++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31580@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31581 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31582 int ret;
31583
31584+ pax_track_stack();
31585+
31586 or = osd_start_request(od, GFP_KERNEL);
31587 if (!or)
31588 return -ENOMEM;
31589diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31590--- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31591+++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31592@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31593 res->scsi_dev = scsi_dev;
31594 scsi_dev->hostdata = res;
31595 res->change_detected = 0;
31596- atomic_set(&res->read_failures, 0);
31597- atomic_set(&res->write_failures, 0);
31598+ atomic_set_unchecked(&res->read_failures, 0);
31599+ atomic_set_unchecked(&res->write_failures, 0);
31600 rc = 0;
31601 }
31602 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31603@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31604
31605 /* If this was a SCSI read/write command keep count of errors */
31606 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31607- atomic_inc(&res->read_failures);
31608+ atomic_inc_unchecked(&res->read_failures);
31609 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31610- atomic_inc(&res->write_failures);
31611+ atomic_inc_unchecked(&res->write_failures);
31612
31613 if (!RES_IS_GSCSI(res->cfg_entry) &&
31614 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31615@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31616 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31617 * hrrq_id assigned here in queuecommand
31618 */
31619- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31620+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31621 pinstance->num_hrrq;
31622 cmd->cmd_done = pmcraid_io_done;
31623
31624@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31625 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31626 * hrrq_id assigned here in queuecommand
31627 */
31628- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31629+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31630 pinstance->num_hrrq;
31631
31632 if (request_size) {
31633@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31634
31635 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31636 /* add resources only after host is added into system */
31637- if (!atomic_read(&pinstance->expose_resources))
31638+ if (!atomic_read_unchecked(&pinstance->expose_resources))
31639 return;
31640
31641 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31642@@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31643 init_waitqueue_head(&pinstance->reset_wait_q);
31644
31645 atomic_set(&pinstance->outstanding_cmds, 0);
31646- atomic_set(&pinstance->last_message_id, 0);
31647- atomic_set(&pinstance->expose_resources, 0);
31648+ atomic_set_unchecked(&pinstance->last_message_id, 0);
31649+ atomic_set_unchecked(&pinstance->expose_resources, 0);
31650
31651 INIT_LIST_HEAD(&pinstance->free_res_q);
31652 INIT_LIST_HEAD(&pinstance->used_res_q);
31653@@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31654 /* Schedule worker thread to handle CCN and take care of adding and
31655 * removing devices to OS
31656 */
31657- atomic_set(&pinstance->expose_resources, 1);
31658+ atomic_set_unchecked(&pinstance->expose_resources, 1);
31659 schedule_work(&pinstance->worker_q);
31660 return rc;
31661
31662diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31663--- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31664+++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31665@@ -750,7 +750,7 @@ struct pmcraid_instance {
31666 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31667
31668 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31669- atomic_t last_message_id;
31670+ atomic_unchecked_t last_message_id;
31671
31672 /* configuration table */
31673 struct pmcraid_config_table *cfg_table;
31674@@ -779,7 +779,7 @@ struct pmcraid_instance {
31675 atomic_t outstanding_cmds;
31676
31677 /* should add/delete resources to mid-layer now ?*/
31678- atomic_t expose_resources;
31679+ atomic_unchecked_t expose_resources;
31680
31681
31682
31683@@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31684 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31685 };
31686 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31687- atomic_t read_failures; /* count of failed READ commands */
31688- atomic_t write_failures; /* count of failed WRITE commands */
31689+ atomic_unchecked_t read_failures; /* count of failed READ commands */
31690+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31691
31692 /* To indicate add/delete/modify during CCN */
31693 u8 change_detected;
31694diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31695--- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31696+++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31697@@ -2236,7 +2236,7 @@ struct isp_operations {
31698 int (*get_flash_version) (struct scsi_qla_host *, void *);
31699 int (*start_scsi) (srb_t *);
31700 int (*abort_isp) (struct scsi_qla_host *);
31701-};
31702+} __no_const;
31703
31704 /* MSI-X Support *************************************************************/
31705
31706diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31707--- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31708+++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31709@@ -256,7 +256,7 @@ struct ddb_entry {
31710 atomic_t retry_relogin_timer; /* Min Time between relogins
31711 * (4000 only) */
31712 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31713- atomic_t relogin_retry_count; /* Num of times relogin has been
31714+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31715 * retried */
31716
31717 uint16_t port;
31718diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31719--- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31720+++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31721@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31722 ddb_entry->fw_ddb_index = fw_ddb_index;
31723 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31724 atomic_set(&ddb_entry->relogin_timer, 0);
31725- atomic_set(&ddb_entry->relogin_retry_count, 0);
31726+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31727 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31728 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31729 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31730@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31731 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31732 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31733 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31734- atomic_set(&ddb_entry->relogin_retry_count, 0);
31735+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31736 atomic_set(&ddb_entry->relogin_timer, 0);
31737 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31738 iscsi_unblock_session(ddb_entry->sess);
31739diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31740--- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31741+++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31742@@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31743 ddb_entry->fw_ddb_device_state ==
31744 DDB_DS_SESSION_FAILED) {
31745 /* Reset retry relogin timer */
31746- atomic_inc(&ddb_entry->relogin_retry_count);
31747+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31748 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31749 " timed out-retrying"
31750 " relogin (%d)\n",
31751 ha->host_no,
31752 ddb_entry->fw_ddb_index,
31753- atomic_read(&ddb_entry->
31754+ atomic_read_unchecked(&ddb_entry->
31755 relogin_retry_count))
31756 );
31757 start_dpc++;
31758diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31759--- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31760+++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31761@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31762 unsigned long timeout;
31763 int rtn = 0;
31764
31765- atomic_inc(&cmd->device->iorequest_cnt);
31766+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31767
31768 /* check if the device is still usable */
31769 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31770diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31771--- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31772+++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31773@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31774 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31775 unsigned char *cmd = (unsigned char *)scp->cmnd;
31776
31777+ pax_track_stack();
31778+
31779 if ((errsts = check_readiness(scp, 1, devip)))
31780 return errsts;
31781 memset(arr, 0, sizeof(arr));
31782@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31783 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31784 unsigned char *cmd = (unsigned char *)scp->cmnd;
31785
31786+ pax_track_stack();
31787+
31788 if ((errsts = check_readiness(scp, 1, devip)))
31789 return errsts;
31790 memset(arr, 0, sizeof(arr));
31791diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31792--- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31793+++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31794@@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31795 shost = sdev->host;
31796 scsi_init_cmd_errh(cmd);
31797 cmd->result = DID_NO_CONNECT << 16;
31798- atomic_inc(&cmd->device->iorequest_cnt);
31799+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31800
31801 /*
31802 * SCSI request completion path will do scsi_device_unbusy(),
31803@@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31804
31805 INIT_LIST_HEAD(&cmd->eh_entry);
31806
31807- atomic_inc(&cmd->device->iodone_cnt);
31808+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
31809 if (cmd->result)
31810- atomic_inc(&cmd->device->ioerr_cnt);
31811+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31812
31813 disposition = scsi_decide_disposition(cmd);
31814 if (disposition != SUCCESS &&
31815diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31816--- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31817+++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31818@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31819 char *buf) \
31820 { \
31821 struct scsi_device *sdev = to_scsi_device(dev); \
31822- unsigned long long count = atomic_read(&sdev->field); \
31823+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
31824 return snprintf(buf, 20, "0x%llx\n", count); \
31825 } \
31826 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31827diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31828--- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31829+++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31830@@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31831 * Netlink Infrastructure
31832 */
31833
31834-static atomic_t fc_event_seq;
31835+static atomic_unchecked_t fc_event_seq;
31836
31837 /**
31838 * fc_get_event_number - Obtain the next sequential FC event number
31839@@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31840 u32
31841 fc_get_event_number(void)
31842 {
31843- return atomic_add_return(1, &fc_event_seq);
31844+ return atomic_add_return_unchecked(1, &fc_event_seq);
31845 }
31846 EXPORT_SYMBOL(fc_get_event_number);
31847
31848@@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31849 {
31850 int error;
31851
31852- atomic_set(&fc_event_seq, 0);
31853+ atomic_set_unchecked(&fc_event_seq, 0);
31854
31855 error = transport_class_register(&fc_host_class);
31856 if (error)
31857@@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31858 char *cp;
31859
31860 *val = simple_strtoul(buf, &cp, 0);
31861- if ((*cp && (*cp != '\n')) || (*val < 0))
31862+ if (*cp && (*cp != '\n'))
31863 return -EINVAL;
31864 /*
31865 * Check for overflow; dev_loss_tmo is u32
31866diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31867--- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31868+++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31869@@ -83,7 +83,7 @@ struct iscsi_internal {
31870 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31871 };
31872
31873-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31874+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31875 static struct workqueue_struct *iscsi_eh_timer_workq;
31876
31877 /*
31878@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31879 int err;
31880
31881 ihost = shost->shost_data;
31882- session->sid = atomic_add_return(1, &iscsi_session_nr);
31883+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31884
31885 if (id == ISCSI_MAX_TARGET) {
31886 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31887@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31888 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31889 ISCSI_TRANSPORT_VERSION);
31890
31891- atomic_set(&iscsi_session_nr, 0);
31892+ atomic_set_unchecked(&iscsi_session_nr, 0);
31893
31894 err = class_register(&iscsi_transport_class);
31895 if (err)
31896diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31897--- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31898+++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31899@@ -33,7 +33,7 @@
31900 #include "scsi_transport_srp_internal.h"
31901
31902 struct srp_host_attrs {
31903- atomic_t next_port_id;
31904+ atomic_unchecked_t next_port_id;
31905 };
31906 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31907
31908@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31909 struct Scsi_Host *shost = dev_to_shost(dev);
31910 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31911
31912- atomic_set(&srp_host->next_port_id, 0);
31913+ atomic_set_unchecked(&srp_host->next_port_id, 0);
31914 return 0;
31915 }
31916
31917@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31918 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31919 rport->roles = ids->roles;
31920
31921- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31922+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31923 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31924
31925 transport_setup_device(&rport->dev);
31926diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31927--- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31928+++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31929@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31930 const struct file_operations * fops;
31931 };
31932
31933-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31934+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31935 {"allow_dio", &adio_fops},
31936 {"debug", &debug_fops},
31937 {"def_reserved_size", &dressz_fops},
31938@@ -2325,7 +2325,7 @@ sg_proc_init(void)
31939 {
31940 int k, mask;
31941 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31942- struct sg_proc_leaf * leaf;
31943+ const struct sg_proc_leaf * leaf;
31944
31945 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31946 if (!sg_proc_sgp)
31947diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31948--- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31949+++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31950@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31951 int do_iounmap = 0;
31952 int do_disable_device = 1;
31953
31954+ pax_track_stack();
31955+
31956 memset(&sym_dev, 0, sizeof(sym_dev));
31957 memset(&nvram, 0, sizeof(nvram));
31958 sym_dev.pdev = pdev;
31959diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
31960--- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
31961+++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
31962@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31963 dma_addr_t base;
31964 unsigned i;
31965
31966+ pax_track_stack();
31967+
31968 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31969 cmd.reqRingNumPages = adapter->req_pages;
31970 cmd.cmpRingNumPages = adapter->cmp_pages;
31971diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
31972--- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
31973+++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
31974@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31975 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31976
31977 /* portable code must never pass more than 32 bytes */
31978-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31979+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
31980
31981 static u8 *buf;
31982
31983diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31984--- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-05-19 00:06:34.000000000 -0400
31985+++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-14 12:12:59.000000000 -0400
31986@@ -384,7 +384,7 @@ static struct ar_cookie s_ar_cookie_mem[
31987 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31988
31989
31990-static struct net_device_ops ar6000_netdev_ops = {
31991+static net_device_ops_no_const ar6000_netdev_ops = {
31992 .ndo_init = NULL,
31993 .ndo_open = ar6000_open,
31994 .ndo_stop = ar6000_close,
31995diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31996--- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-05-19 00:06:34.000000000 -0400
31997+++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-14 09:32:05.000000000 -0400
31998@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31999 typedef struct ar6k_pal_config_s
32000 {
32001 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32002-}ar6k_pal_config_t;
32003+} __no_const ar6k_pal_config_t;
32004
32005 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32006 #endif /* _AR6K_PAL_H_ */
32007diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32008--- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
32009+++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
32010@@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32011 free_netdev(ifp->net);
32012 }
32013 /* Allocate etherdev, including space for private structure */
32014- ifp->net = alloc_etherdev(sizeof(dhd));
32015+ ifp->net = alloc_etherdev(sizeof(*dhd));
32016 if (!ifp->net) {
32017 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32018 ret = -ENOMEM;
32019 }
32020 if (ret == 0) {
32021 strcpy(ifp->net->name, ifp->name);
32022- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32023+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32024 err = dhd_net_attach(&dhd->pub, ifp->idx);
32025 if (err != 0) {
32026 DHD_ERROR(("%s: dhd_net_attach failed, "
32027@@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32028 strcpy(nv_path, nvram_path);
32029
32030 /* Allocate etherdev, including space for private structure */
32031- net = alloc_etherdev(sizeof(dhd));
32032+ net = alloc_etherdev(sizeof(*dhd));
32033 if (!net) {
32034 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32035 goto fail;
32036@@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32037 /*
32038 * Save the dhd_info into the priv
32039 */
32040- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32041+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32042
32043 /* Set network interface name if it was provided as module parameter */
32044 if (iface_name[0]) {
32045@@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32046 /*
32047 * Save the dhd_info into the priv
32048 */
32049- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32050+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32051
32052 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32053 g_bus = bus;
32054diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32055--- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32056+++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32057@@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32058 list = (wl_u32_list_t *) channels;
32059
32060 dwrq->length = sizeof(struct iw_range);
32061- memset(range, 0, sizeof(range));
32062+ memset(range, 0, sizeof(*range));
32063
32064 range->min_nwid = range->max_nwid = 0;
32065
32066diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32067--- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32068+++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32069@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32070 struct net_device_stats *stats = &etdev->net_stats;
32071
32072 if (tcb->flags & fMP_DEST_BROAD)
32073- atomic_inc(&etdev->Stats.brdcstxmt);
32074+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32075 else if (tcb->flags & fMP_DEST_MULTI)
32076- atomic_inc(&etdev->Stats.multixmt);
32077+ atomic_inc_unchecked(&etdev->Stats.multixmt);
32078 else
32079- atomic_inc(&etdev->Stats.unixmt);
32080+ atomic_inc_unchecked(&etdev->Stats.unixmt);
32081
32082 if (tcb->skb) {
32083 stats->tx_bytes += tcb->skb->len;
32084diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32085--- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32086+++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32087@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32088 * operations
32089 */
32090 u32 unircv; /* # multicast packets received */
32091- atomic_t unixmt; /* # multicast packets for Tx */
32092+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32093 u32 multircv; /* # multicast packets received */
32094- atomic_t multixmt; /* # multicast packets for Tx */
32095+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32096 u32 brdcstrcv; /* # broadcast packets received */
32097- atomic_t brdcstxmt; /* # broadcast packets for Tx */
32098+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32099 u32 norcvbuf; /* # Rx packets discarded */
32100 u32 noxmtbuf; /* # Tx packets discarded */
32101
32102diff -urNp linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c
32103--- linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-05-19 00:06:34.000000000 -0400
32104+++ linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-08-14 12:25:25.000000000 -0400
32105@@ -230,8 +230,10 @@ int psb_mmap(struct file *filp, struct v
32106 if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
32107 dev_priv->ttm_vm_ops = (struct vm_operations_struct *)
32108 vma->vm_ops;
32109- psb_ttm_vm_ops = *vma->vm_ops;
32110- psb_ttm_vm_ops.fault = &psb_ttm_fault;
32111+ pax_open_kernel();
32112+ memcpy((void *)&psb_ttm_vm_ops, vma->vm_ops, sizeof(psb_ttm_vm_ops));
32113+ *(void **)&psb_ttm_vm_ops.fault = &psb_ttm_fault;
32114+ pax_close_kernel();
32115 }
32116
32117 vma->vm_ops = &psb_ttm_vm_ops;
32118diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32119--- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32120+++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32121@@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32122 unsigned long flags;
32123 int ret = 0;
32124
32125- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32126- atomic_inc(&vmbus_connection.next_gpadl_handle);
32127+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32128+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32129
32130 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32131 if (ret)
32132diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32133--- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32134+++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32135@@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32136 u64 output_address = (output) ? virt_to_phys(output) : 0;
32137 u32 output_address_hi = output_address >> 32;
32138 u32 output_address_lo = output_address & 0xFFFFFFFF;
32139- volatile void *hypercall_page = hv_context.hypercall_page;
32140+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32141
32142 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32143 control, input, output);
32144diff -urNp linux-2.6.39.4/drivers/staging/hv/hv_mouse.c linux-2.6.39.4/drivers/staging/hv/hv_mouse.c
32145--- linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-05-19 00:06:34.000000000 -0400
32146+++ linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-08-13 20:26:10.000000000 -0400
32147@@ -898,8 +898,10 @@ static void reportdesc_callback(struct h
32148 if (hid_dev) {
32149 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32150
32151- hid_dev->ll_driver->open = mousevsc_hid_open;
32152- hid_dev->ll_driver->close = mousevsc_hid_close;
32153+ pax_open_kernel();
32154+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32155+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32156+ pax_close_kernel();
32157
32158 hid_dev->bus = BUS_VIRTUAL;
32159 hid_dev->vendor = input_device_ctx->device_info.vendor;
32160diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32161--- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32162+++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32163@@ -49,7 +49,7 @@ struct rndis_device {
32164
32165 enum rndis_device_state state;
32166 u32 link_stat;
32167- atomic_t new_req_id;
32168+ atomic_unchecked_t new_req_id;
32169
32170 spinlock_t request_lock;
32171 struct list_head req_list;
32172@@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32173 * template
32174 */
32175 set = &rndis_msg->msg.set_req;
32176- set->req_id = atomic_inc_return(&dev->new_req_id);
32177+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32178
32179 /* Add to the request list */
32180 spin_lock_irqsave(&dev->request_lock, flags);
32181@@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32182
32183 /* Setup the rndis set */
32184 halt = &request->request_msg.msg.halt_req;
32185- halt->req_id = atomic_inc_return(&dev->new_req_id);
32186+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32187
32188 /* Ignore return since this msg is optional. */
32189 rndis_filter_send_request(dev, request);
32190diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32191--- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32192+++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32193@@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32194 {
32195 int ret = 0;
32196
32197- static atomic_t device_num = ATOMIC_INIT(0);
32198+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32199
32200 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32201 child_device_obj);
32202
32203 /* Set the device name. Otherwise, device_register() will fail. */
32204 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32205- atomic_inc_return(&device_num));
32206+ atomic_inc_return_unchecked(&device_num));
32207
32208 /* The new device belongs to this bus */
32209 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32210diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32211--- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32212+++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32213@@ -58,7 +58,7 @@ enum vmbus_connect_state {
32214 struct vmbus_connection {
32215 enum vmbus_connect_state conn_state;
32216
32217- atomic_t next_gpadl_handle;
32218+ atomic_unchecked_t next_gpadl_handle;
32219
32220 /*
32221 * Represents channel interrupts. Each bit position represents a
32222diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32223--- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32224+++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-13 20:14:25.000000000 -0400
32225@@ -86,7 +86,7 @@ struct iio_ring_access_funcs {
32226
32227 int (*is_enabled)(struct iio_ring_buffer *ring);
32228 int (*enable)(struct iio_ring_buffer *ring);
32229-};
32230+} __no_const;
32231
32232 /**
32233 * struct iio_ring_buffer - general ring buffer structure
32234@@ -134,7 +134,7 @@ struct iio_ring_buffer {
32235 struct iio_handler access_handler;
32236 struct iio_event_interface ev_int;
32237 struct iio_shared_ev_pointer shared_ev_pointer;
32238- struct iio_ring_access_funcs access;
32239+ struct iio_ring_access_funcs access;
32240 int (*preenable)(struct iio_dev *);
32241 int (*postenable)(struct iio_dev *);
32242 int (*predisable)(struct iio_dev *);
32243diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32244--- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32245+++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32246@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32247 * since the RX tasklet also increments it.
32248 */
32249 #ifdef CONFIG_64BIT
32250- atomic64_add(rx_status.dropped_packets,
32251- (atomic64_t *)&priv->stats.rx_dropped);
32252+ atomic64_add_unchecked(rx_status.dropped_packets,
32253+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32254 #else
32255- atomic_add(rx_status.dropped_packets,
32256- (atomic_t *)&priv->stats.rx_dropped);
32257+ atomic_add_unchecked(rx_status.dropped_packets,
32258+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32259 #endif
32260 }
32261
32262diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32263--- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32264+++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32265@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32266 /* Increment RX stats for virtual ports */
32267 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32268 #ifdef CONFIG_64BIT
32269- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32270- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32271+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32272+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32273 #else
32274- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32275- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32276+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32277+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32278 #endif
32279 }
32280 netif_receive_skb(skb);
32281@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32282 dev->name);
32283 */
32284 #ifdef CONFIG_64BIT
32285- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32286+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32287 #else
32288- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32289+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32290 #endif
32291 dev_kfree_skb_irq(skb);
32292 }
32293diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32294--- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32295+++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32296@@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32297 mutex_init(&psb->mcache_lock);
32298 psb->mcache_root = RB_ROOT;
32299 psb->mcache_timeout = msecs_to_jiffies(5000);
32300- atomic_long_set(&psb->mcache_gen, 0);
32301+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32302
32303 psb->trans_max_pages = 100;
32304
32305@@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32306 INIT_LIST_HEAD(&psb->crypto_ready_list);
32307 INIT_LIST_HEAD(&psb->crypto_active_list);
32308
32309- atomic_set(&psb->trans_gen, 1);
32310+ atomic_set_unchecked(&psb->trans_gen, 1);
32311 atomic_long_set(&psb->total_inodes, 0);
32312
32313 mutex_init(&psb->state_lock);
32314diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32315--- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32316+++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32317@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32318 m->data = data;
32319 m->start = start;
32320 m->size = size;
32321- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32322+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32323
32324 mutex_lock(&psb->mcache_lock);
32325 err = pohmelfs_mcache_insert(psb, m);
32326diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32327--- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32328+++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32329@@ -571,14 +571,14 @@ struct pohmelfs_config;
32330 struct pohmelfs_sb {
32331 struct rb_root mcache_root;
32332 struct mutex mcache_lock;
32333- atomic_long_t mcache_gen;
32334+ atomic_long_unchecked_t mcache_gen;
32335 unsigned long mcache_timeout;
32336
32337 unsigned int idx;
32338
32339 unsigned int trans_retries;
32340
32341- atomic_t trans_gen;
32342+ atomic_unchecked_t trans_gen;
32343
32344 unsigned int crypto_attached_size;
32345 unsigned int crypto_align_size;
32346diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32347--- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32348+++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32349@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32350 int err;
32351 struct netfs_cmd *cmd = t->iovec.iov_base;
32352
32353- t->gen = atomic_inc_return(&psb->trans_gen);
32354+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32355
32356 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32357 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32358diff -urNp linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h
32359--- linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
32360+++ linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-13 20:31:57.000000000 -0400
32361@@ -83,7 +83,7 @@ struct _io_ops {
32362 u8 *pmem);
32363 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32364 u8 *pmem);
32365-};
32366+} __no_const;
32367
32368 struct io_req {
32369 struct list_head list;
32370diff -urNp linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c
32371--- linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-05-19 00:06:34.000000000 -0400
32372+++ linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-14 12:29:10.000000000 -0400
32373@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32374 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32375
32376 if (rlen)
32377- if (copy_to_user(data, &resp, rlen))
32378+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32379 return -EFAULT;
32380
32381 return 0;
32382diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32383--- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32384+++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32385@@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32386 * re-used for each stats call.
32387 */
32388 static comstats_t stli_comstats;
32389-static combrd_t stli_brdstats;
32390 static struct asystats stli_cdkstats;
32391
32392 /*****************************************************************************/
32393@@ -4003,6 +4002,7 @@ out:
32394
32395 static int stli_getbrdstats(combrd_t __user *bp)
32396 {
32397+ combrd_t stli_brdstats;
32398 struct stlibrd *brdp;
32399 unsigned int i;
32400
32401@@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32402 struct stliport stli_dummyport;
32403 struct stliport *portp;
32404
32405+ pax_track_stack();
32406+
32407 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32408 return -EFAULT;
32409 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32410@@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32411 struct stlibrd stli_dummybrd;
32412 struct stlibrd *brdp;
32413
32414+ pax_track_stack();
32415+
32416 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32417 return -EFAULT;
32418 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32419diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32420--- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32421+++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32422@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32423 struct stlport stl_dummyport;
32424 struct stlport *portp;
32425
32426+ pax_track_stack();
32427+
32428 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32429 return -EFAULT;
32430 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32431diff -urNp linux-2.6.39.4/drivers/staging/usbip/stub_dev.c linux-2.6.39.4/drivers/staging/usbip/stub_dev.c
32432--- linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-05-19 00:06:34.000000000 -0400
32433+++ linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-08-13 20:32:52.000000000 -0400
32434@@ -357,9 +357,11 @@ static struct stub_device *stub_device_a
32435
32436 init_waitqueue_head(&sdev->tx_waitq);
32437
32438- sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32439- sdev->ud.eh_ops.reset = stub_device_reset;
32440- sdev->ud.eh_ops.unusable = stub_device_unusable;
32441+ pax_open_kernel();
32442+ *(void **)&sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32443+ *(void **)&sdev->ud.eh_ops.reset = stub_device_reset;
32444+ *(void **)&sdev->ud.eh_ops.unusable = stub_device_unusable;
32445+ pax_close_kernel();
32446
32447 usbip_start_eh(&sdev->ud);
32448
32449diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32450--- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32451+++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32452@@ -92,7 +92,7 @@ struct vhci_hcd {
32453 unsigned resuming:1;
32454 unsigned long re_timeout;
32455
32456- atomic_t seqnum;
32457+ atomic_unchecked_t seqnum;
32458
32459 /*
32460 * NOTE:
32461diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32462--- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32463+++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-13 20:33:49.000000000 -0400
32464@@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32465 return;
32466 }
32467
32468- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32469+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32470 if (priv->seqnum == 0xffff)
32471 usbip_uinfo("seqnum max\n");
32472
32473@@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32474 return -ENOMEM;
32475 }
32476
32477- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32478+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32479 if (unlink->seqnum == 0xffff)
32480 usbip_uinfo("seqnum max\n");
32481
32482@@ -965,9 +965,11 @@ static void vhci_device_init(struct vhci
32483
32484 init_waitqueue_head(&vdev->waitq_tx);
32485
32486- vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32487- vdev->ud.eh_ops.reset = vhci_device_reset;
32488- vdev->ud.eh_ops.unusable = vhci_device_unusable;
32489+ pax_open_kernel();
32490+ *(void **)&vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32491+ *(void **)&vdev->ud.eh_ops.reset = vhci_device_reset;
32492+ *(void **)&vdev->ud.eh_ops.unusable = vhci_device_unusable;
32493+ pax_close_kernel();
32494
32495 usbip_start_eh(&vdev->ud);
32496 }
32497@@ -992,7 +994,7 @@ static int vhci_start(struct usb_hcd *hc
32498 vdev->rhport = rhport;
32499 }
32500
32501- atomic_set(&vhci->seqnum, 0);
32502+ atomic_set_unchecked(&vhci->seqnum, 0);
32503 spin_lock_init(&vhci->lock);
32504
32505
32506diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32507--- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32508+++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32509@@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32510 usbip_uerr("cannot find a urb of seqnum %u\n",
32511 pdu->base.seqnum);
32512 usbip_uinfo("max seqnum %d\n",
32513- atomic_read(&the_controller->seqnum));
32514+ atomic_read_unchecked(&the_controller->seqnum));
32515 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32516 return;
32517 }
32518diff -urNp linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c
32519--- linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
32520+++ linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-13 20:36:25.000000000 -0400
32521@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32522
32523 struct usbctlx_completor {
32524 int (*complete) (struct usbctlx_completor *);
32525-};
32526+} __no_const;
32527
32528 static int
32529 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32530diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32531--- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32532+++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32533@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32534 char path[ALUA_METADATA_PATH_LEN];
32535 int len;
32536
32537+ pax_track_stack();
32538+
32539 memset(path, 0, ALUA_METADATA_PATH_LEN);
32540
32541 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32542@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32543 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32544 int len;
32545
32546+ pax_track_stack();
32547+
32548 memset(path, 0, ALUA_METADATA_PATH_LEN);
32549 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32550
32551diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32552--- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32553+++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32554@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32555 int length = 0;
32556 unsigned char buf[SE_MODE_PAGE_BUF];
32557
32558+ pax_track_stack();
32559+
32560 memset(buf, 0, SE_MODE_PAGE_BUF);
32561
32562 switch (cdb[2] & 0x3f) {
32563diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32564--- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32565+++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32566@@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32567 ssize_t len = 0;
32568 int reg_count = 0, prf_isid;
32569
32570+ pax_track_stack();
32571+
32572 if (!(su_dev->se_dev_ptr))
32573 return -ENODEV;
32574
32575diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32576--- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32577+++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32578@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32579 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32580 u16 tpgt;
32581
32582+ pax_track_stack();
32583+
32584 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32585 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32586 /*
32587@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32588 ssize_t len = 0;
32589 int reg_count = 0;
32590
32591+ pax_track_stack();
32592+
32593 memset(buf, 0, pr_aptpl_buf_len);
32594 /*
32595 * Called to clear metadata once APTPL has been deactivated.
32596@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32597 char path[512];
32598 int ret;
32599
32600+ pax_track_stack();
32601+
32602 memset(iov, 0, sizeof(struct iovec));
32603 memset(path, 0, 512);
32604
32605diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32606--- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32607+++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32608@@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32609 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32610 T_TASK(cmd)->t_task_cdbs,
32611 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32612- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32613+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32614 atomic_read(&T_TASK(cmd)->t_transport_active),
32615 atomic_read(&T_TASK(cmd)->t_transport_stop),
32616 atomic_read(&T_TASK(cmd)->t_transport_sent));
32617@@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32618 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32619 " task: %p, t_fe_count: %d dev: %p\n", task,
32620 fe_count, dev);
32621- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32622+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32623 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32624 flags);
32625 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32626@@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32627 }
32628 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32629 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32630- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32631+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32632 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32633 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32634
32635diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32636--- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32637+++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32638@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32639
32640 dev->queue_depth = dev_limits->queue_depth;
32641 atomic_set(&dev->depth_left, dev->queue_depth);
32642- atomic_set(&dev->dev_ordered_id, 0);
32643+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
32644
32645 se_dev_set_default_attribs(dev, dev_limits);
32646
32647@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32648 * Used to determine when ORDERED commands should go from
32649 * Dormant to Active status.
32650 */
32651- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32652+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32653 smp_mb__after_atomic_inc();
32654 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32655 cmd->se_ordered_id, cmd->sam_task_attr,
32656@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32657 " t_transport_active: %d t_transport_stop: %d"
32658 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32659 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32660- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32661+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32662 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32663 atomic_read(&T_TASK(cmd)->t_transport_active),
32664 atomic_read(&T_TASK(cmd)->t_transport_stop),
32665@@ -2673,9 +2673,9 @@ check_depth:
32666 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32667 atomic_set(&task->task_active, 1);
32668 atomic_set(&task->task_sent, 1);
32669- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32670+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32671
32672- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32673+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32674 T_TASK(cmd)->t_task_cdbs)
32675 atomic_set(&cmd->transport_sent, 1);
32676
32677@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32678 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32679 }
32680 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32681- atomic_read(&T_TASK(cmd)->t_transport_aborted))
32682+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32683 goto remove;
32684
32685 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32686@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32687 {
32688 int ret = 0;
32689
32690- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32691+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32692 if (!(send_status) ||
32693 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32694 return 1;
32695@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32696 */
32697 if (cmd->data_direction == DMA_TO_DEVICE) {
32698 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32699- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32700+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32701 smp_mb__after_atomic_inc();
32702 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32703 transport_new_cmd_failure(cmd);
32704@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32705 CMD_TFO(cmd)->get_task_tag(cmd),
32706 T_TASK(cmd)->t_task_cdbs,
32707 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32708- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32709+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32710 atomic_read(&T_TASK(cmd)->t_transport_active),
32711 atomic_read(&T_TASK(cmd)->t_transport_stop),
32712 atomic_read(&T_TASK(cmd)->t_transport_sent));
32713diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32714--- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32715+++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32716@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32717 bool mContinue;
32718 char *pIn, *pOut;
32719
32720+ pax_track_stack();
32721+
32722 if (!SCI_Prepare(j))
32723 return 0;
32724
32725diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32726--- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32727+++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32728@@ -83,6 +83,7 @@
32729 #include <asm/hvcserver.h>
32730 #include <asm/uaccess.h>
32731 #include <asm/vio.h>
32732+#include <asm/local.h>
32733
32734 /*
32735 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32736@@ -270,7 +271,7 @@ struct hvcs_struct {
32737 unsigned int index;
32738
32739 struct tty_struct *tty;
32740- int open_count;
32741+ local_t open_count;
32742
32743 /*
32744 * Used to tell the driver kernel_thread what operations need to take
32745@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32746
32747 spin_lock_irqsave(&hvcsd->lock, flags);
32748
32749- if (hvcsd->open_count > 0) {
32750+ if (local_read(&hvcsd->open_count) > 0) {
32751 spin_unlock_irqrestore(&hvcsd->lock, flags);
32752 printk(KERN_INFO "HVCS: vterm state unchanged. "
32753 "The hvcs device node is still in use.\n");
32754@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32755 if ((retval = hvcs_partner_connect(hvcsd)))
32756 goto error_release;
32757
32758- hvcsd->open_count = 1;
32759+ local_set(&hvcsd->open_count, 1);
32760 hvcsd->tty = tty;
32761 tty->driver_data = hvcsd;
32762
32763@@ -1179,7 +1180,7 @@ fast_open:
32764
32765 spin_lock_irqsave(&hvcsd->lock, flags);
32766 kref_get(&hvcsd->kref);
32767- hvcsd->open_count++;
32768+ local_inc(&hvcsd->open_count);
32769 hvcsd->todo_mask |= HVCS_SCHED_READ;
32770 spin_unlock_irqrestore(&hvcsd->lock, flags);
32771
32772@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32773 hvcsd = tty->driver_data;
32774
32775 spin_lock_irqsave(&hvcsd->lock, flags);
32776- if (--hvcsd->open_count == 0) {
32777+ if (local_dec_and_test(&hvcsd->open_count)) {
32778
32779 vio_disable_interrupts(hvcsd->vdev);
32780
32781@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32782 free_irq(irq, hvcsd);
32783 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32784 return;
32785- } else if (hvcsd->open_count < 0) {
32786+ } else if (local_read(&hvcsd->open_count) < 0) {
32787 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32788 " is missmanaged.\n",
32789- hvcsd->vdev->unit_address, hvcsd->open_count);
32790+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32791 }
32792
32793 spin_unlock_irqrestore(&hvcsd->lock, flags);
32794@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32795
32796 spin_lock_irqsave(&hvcsd->lock, flags);
32797 /* Preserve this so that we know how many kref refs to put */
32798- temp_open_count = hvcsd->open_count;
32799+ temp_open_count = local_read(&hvcsd->open_count);
32800
32801 /*
32802 * Don't kref put inside the spinlock because the destruction
32803@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32804 hvcsd->tty->driver_data = NULL;
32805 hvcsd->tty = NULL;
32806
32807- hvcsd->open_count = 0;
32808+ local_set(&hvcsd->open_count, 0);
32809
32810 /* This will drop any buffered data on the floor which is OK in a hangup
32811 * scenario. */
32812@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32813 * the middle of a write operation? This is a crummy place to do this
32814 * but we want to keep it all in the spinlock.
32815 */
32816- if (hvcsd->open_count <= 0) {
32817+ if (local_read(&hvcsd->open_count) <= 0) {
32818 spin_unlock_irqrestore(&hvcsd->lock, flags);
32819 return -ENODEV;
32820 }
32821@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32822 {
32823 struct hvcs_struct *hvcsd = tty->driver_data;
32824
32825- if (!hvcsd || hvcsd->open_count <= 0)
32826+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32827 return 0;
32828
32829 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32830diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32831--- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32832+++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32833@@ -29,6 +29,7 @@
32834 #include <linux/tty_driver.h>
32835 #include <linux/tty_flip.h>
32836 #include <linux/uaccess.h>
32837+#include <asm/local.h>
32838
32839 #include "tty.h"
32840 #include "network.h"
32841@@ -51,7 +52,7 @@ struct ipw_tty {
32842 int tty_type;
32843 struct ipw_network *network;
32844 struct tty_struct *linux_tty;
32845- int open_count;
32846+ local_t open_count;
32847 unsigned int control_lines;
32848 struct mutex ipw_tty_mutex;
32849 int tx_bytes_queued;
32850@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32851 mutex_unlock(&tty->ipw_tty_mutex);
32852 return -ENODEV;
32853 }
32854- if (tty->open_count == 0)
32855+ if (local_read(&tty->open_count) == 0)
32856 tty->tx_bytes_queued = 0;
32857
32858- tty->open_count++;
32859+ local_inc(&tty->open_count);
32860
32861 tty->linux_tty = linux_tty;
32862 linux_tty->driver_data = tty;
32863@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32864
32865 static void do_ipw_close(struct ipw_tty *tty)
32866 {
32867- tty->open_count--;
32868-
32869- if (tty->open_count == 0) {
32870+ if (local_dec_return(&tty->open_count) == 0) {
32871 struct tty_struct *linux_tty = tty->linux_tty;
32872
32873 if (linux_tty != NULL) {
32874@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32875 return;
32876
32877 mutex_lock(&tty->ipw_tty_mutex);
32878- if (tty->open_count == 0) {
32879+ if (local_read(&tty->open_count) == 0) {
32880 mutex_unlock(&tty->ipw_tty_mutex);
32881 return;
32882 }
32883@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32884 return;
32885 }
32886
32887- if (!tty->open_count) {
32888+ if (!local_read(&tty->open_count)) {
32889 mutex_unlock(&tty->ipw_tty_mutex);
32890 return;
32891 }
32892@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32893 return -ENODEV;
32894
32895 mutex_lock(&tty->ipw_tty_mutex);
32896- if (!tty->open_count) {
32897+ if (!local_read(&tty->open_count)) {
32898 mutex_unlock(&tty->ipw_tty_mutex);
32899 return -EINVAL;
32900 }
32901@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32902 if (!tty)
32903 return -ENODEV;
32904
32905- if (!tty->open_count)
32906+ if (!local_read(&tty->open_count))
32907 return -EINVAL;
32908
32909 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32910@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32911 if (!tty)
32912 return 0;
32913
32914- if (!tty->open_count)
32915+ if (!local_read(&tty->open_count))
32916 return 0;
32917
32918 return tty->tx_bytes_queued;
32919@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32920 if (!tty)
32921 return -ENODEV;
32922
32923- if (!tty->open_count)
32924+ if (!local_read(&tty->open_count))
32925 return -EINVAL;
32926
32927 return get_control_lines(tty);
32928@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32929 if (!tty)
32930 return -ENODEV;
32931
32932- if (!tty->open_count)
32933+ if (!local_read(&tty->open_count))
32934 return -EINVAL;
32935
32936 return set_control_lines(tty, set, clear);
32937@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32938 if (!tty)
32939 return -ENODEV;
32940
32941- if (!tty->open_count)
32942+ if (!local_read(&tty->open_count))
32943 return -EINVAL;
32944
32945 /* FIXME: Exactly how is the tty object locked here .. */
32946@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32947 against a parallel ioctl etc */
32948 mutex_lock(&ttyj->ipw_tty_mutex);
32949 }
32950- while (ttyj->open_count)
32951+ while (local_read(&ttyj->open_count))
32952 do_ipw_close(ttyj);
32953 ipwireless_disassociate_network_ttys(network,
32954 ttyj->channel_idx);
32955diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32956--- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32957+++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32958@@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32959 return NULL;
32960 spin_lock_init(&dlci->lock);
32961 dlci->fifo = &dlci->_fifo;
32962- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32963+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32964 kfree(dlci);
32965 return NULL;
32966 }
32967diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32968--- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32969+++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32970@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32971 {
32972 *ops = tty_ldisc_N_TTY;
32973 ops->owner = NULL;
32974- ops->refcount = ops->flags = 0;
32975+ atomic_set(&ops->refcount, 0);
32976+ ops->flags = 0;
32977 }
32978 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32979diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
32980--- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
32981+++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
32982@@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
32983 register_sysctl_table(pty_root_table);
32984
32985 /* Now create the /dev/ptmx special device */
32986+ pax_open_kernel();
32987 tty_default_fops(&ptmx_fops);
32988- ptmx_fops.open = ptmx_open;
32989+ *(void **)&ptmx_fops.open = ptmx_open;
32990+ pax_close_kernel();
32991
32992 cdev_init(&ptmx_cdev, &ptmx_fops);
32993 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32994diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
32995--- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
32996+++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
32997@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32998 struct rocket_ports tmp;
32999 int board;
33000
33001+ pax_track_stack();
33002+
33003 if (!retports)
33004 return -EFAULT;
33005 memset(&tmp, 0, sizeof (tmp));
33006diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
33007--- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
33008+++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
33009@@ -23,8 +23,9 @@
33010 #define MAX_CONFIG_LEN 40
33011
33012 static struct kgdb_io kgdboc_io_ops;
33013+static struct kgdb_io kgdboc_io_ops_console;
33014
33015-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33016+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33017 static int configured = -1;
33018
33019 static char config[MAX_CONFIG_LEN];
33020@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33021 kgdboc_unregister_kbd();
33022 if (configured == 1)
33023 kgdb_unregister_io_module(&kgdboc_io_ops);
33024+ else if (configured == 2)
33025+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
33026 }
33027
33028 static int configure_kgdboc(void)
33029@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33030 int err;
33031 char *cptr = config;
33032 struct console *cons;
33033+ int is_console = 0;
33034
33035 err = kgdboc_option_setup(config);
33036 if (err || !strlen(config) || isspace(config[0]))
33037 goto noconfig;
33038
33039 err = -ENODEV;
33040- kgdboc_io_ops.is_console = 0;
33041 kgdb_tty_driver = NULL;
33042
33043 kgdboc_use_kms = 0;
33044@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33045 int idx;
33046 if (cons->device && cons->device(cons, &idx) == p &&
33047 idx == tty_line) {
33048- kgdboc_io_ops.is_console = 1;
33049+ is_console = 1;
33050 break;
33051 }
33052 cons = cons->next;
33053@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33054 kgdb_tty_line = tty_line;
33055
33056 do_register:
33057- err = kgdb_register_io_module(&kgdboc_io_ops);
33058+ if (is_console) {
33059+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
33060+ configured = 2;
33061+ } else {
33062+ err = kgdb_register_io_module(&kgdboc_io_ops);
33063+ configured = 1;
33064+ }
33065 if (err)
33066 goto noconfig;
33067
33068- configured = 1;
33069-
33070 return 0;
33071
33072 noconfig:
33073@@ -212,7 +219,7 @@ noconfig:
33074 static int __init init_kgdboc(void)
33075 {
33076 /* Already configured? */
33077- if (configured == 1)
33078+ if (configured >= 1)
33079 return 0;
33080
33081 return configure_kgdboc();
33082@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33083 if (config[len - 1] == '\n')
33084 config[len - 1] = '\0';
33085
33086- if (configured == 1)
33087+ if (configured >= 1)
33088 cleanup_kgdboc();
33089
33090 /* Go and configure with the new params. */
33091@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33092 .post_exception = kgdboc_post_exp_handler,
33093 };
33094
33095+static struct kgdb_io kgdboc_io_ops_console = {
33096+ .name = "kgdboc",
33097+ .read_char = kgdboc_get_char,
33098+ .write_char = kgdboc_put_char,
33099+ .pre_exception = kgdboc_pre_exp_handler,
33100+ .post_exception = kgdboc_post_exp_handler,
33101+ .is_console = 1
33102+};
33103+
33104 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33105 /* This is only available if kgdboc is a built in for early debugging */
33106 static int __init kgdboc_early_init(char *opt)
33107diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
33108--- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
33109+++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
33110@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33111 int loop = 1, num, total = 0;
33112 u8 recv_buf[512], *pbuf;
33113
33114+ pax_track_stack();
33115+
33116 pbuf = recv_buf;
33117 do {
33118 num = max3110_read_multi(max, pbuf);
33119diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
33120--- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
33121+++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
33122@@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33123
33124 void tty_default_fops(struct file_operations *fops)
33125 {
33126- *fops = tty_fops;
33127+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33128 }
33129
33130 /*
33131diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
33132--- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
33133+++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
33134@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33135 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33136 struct tty_ldisc_ops *ldo = ld->ops;
33137
33138- ldo->refcount--;
33139+ atomic_dec(&ldo->refcount);
33140 module_put(ldo->owner);
33141 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33142
33143@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33144 spin_lock_irqsave(&tty_ldisc_lock, flags);
33145 tty_ldiscs[disc] = new_ldisc;
33146 new_ldisc->num = disc;
33147- new_ldisc->refcount = 0;
33148+ atomic_set(&new_ldisc->refcount, 0);
33149 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33150
33151 return ret;
33152@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33153 return -EINVAL;
33154
33155 spin_lock_irqsave(&tty_ldisc_lock, flags);
33156- if (tty_ldiscs[disc]->refcount)
33157+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33158 ret = -EBUSY;
33159 else
33160 tty_ldiscs[disc] = NULL;
33161@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33162 if (ldops) {
33163 ret = ERR_PTR(-EAGAIN);
33164 if (try_module_get(ldops->owner)) {
33165- ldops->refcount++;
33166+ atomic_inc(&ldops->refcount);
33167 ret = ldops;
33168 }
33169 }
33170@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33171 unsigned long flags;
33172
33173 spin_lock_irqsave(&tty_ldisc_lock, flags);
33174- ldops->refcount--;
33175+ atomic_dec(&ldops->refcount);
33176 module_put(ldops->owner);
33177 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33178 }
33179diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33180--- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33181+++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33182@@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33183 kbd->kbdmode == VC_OFF) &&
33184 value != KVAL(K_SAK))
33185 return; /* SAK is allowed even in raw mode */
33186+
33187+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33188+ {
33189+ void *func = fn_handler[value];
33190+ if (func == fn_show_state || func == fn_show_ptregs ||
33191+ func == fn_show_mem)
33192+ return;
33193+ }
33194+#endif
33195+
33196 fn_handler[value](vc);
33197 }
33198
33199diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33200--- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33201+++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33202@@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33203
33204 static void notify_write(struct vc_data *vc, unsigned int unicode)
33205 {
33206- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33207+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33208 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33209 }
33210
33211diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33212--- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33213+++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33214@@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33215 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33216 return -EFAULT;
33217
33218- if (!capable(CAP_SYS_TTY_CONFIG))
33219- perm = 0;
33220-
33221 switch (cmd) {
33222 case KDGKBENT:
33223 key_map = key_maps[s];
33224@@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33225 val = (i ? K_HOLE : K_NOSUCHMAP);
33226 return put_user(val, &user_kbe->kb_value);
33227 case KDSKBENT:
33228+ if (!capable(CAP_SYS_TTY_CONFIG))
33229+ perm = 0;
33230+
33231 if (!perm)
33232 return -EPERM;
33233 if (!i && v == K_NOSUCHMAP) {
33234@@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33235 int i, j, k;
33236 int ret;
33237
33238- if (!capable(CAP_SYS_TTY_CONFIG))
33239- perm = 0;
33240-
33241 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33242 if (!kbs) {
33243 ret = -ENOMEM;
33244@@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33245 kfree(kbs);
33246 return ((p && *p) ? -EOVERFLOW : 0);
33247 case KDSKBSENT:
33248+ if (!capable(CAP_SYS_TTY_CONFIG))
33249+ perm = 0;
33250+
33251 if (!perm) {
33252 ret = -EPERM;
33253 goto reterr;
33254diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33255--- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33256+++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33257@@ -25,6 +25,7 @@
33258 #include <linux/kobject.h>
33259 #include <linux/cdev.h>
33260 #include <linux/uio_driver.h>
33261+#include <asm/local.h>
33262
33263 #define UIO_MAX_DEVICES (1U << MINORBITS)
33264
33265@@ -32,10 +33,10 @@ struct uio_device {
33266 struct module *owner;
33267 struct device *dev;
33268 int minor;
33269- atomic_t event;
33270+ atomic_unchecked_t event;
33271 struct fasync_struct *async_queue;
33272 wait_queue_head_t wait;
33273- int vma_count;
33274+ local_t vma_count;
33275 struct uio_info *info;
33276 struct kobject *map_dir;
33277 struct kobject *portio_dir;
33278@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33279 struct device_attribute *attr, char *buf)
33280 {
33281 struct uio_device *idev = dev_get_drvdata(dev);
33282- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33283+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33284 }
33285
33286 static struct device_attribute uio_class_attributes[] = {
33287@@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33288 {
33289 struct uio_device *idev = info->uio_dev;
33290
33291- atomic_inc(&idev->event);
33292+ atomic_inc_unchecked(&idev->event);
33293 wake_up_interruptible(&idev->wait);
33294 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33295 }
33296@@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33297 }
33298
33299 listener->dev = idev;
33300- listener->event_count = atomic_read(&idev->event);
33301+ listener->event_count = atomic_read_unchecked(&idev->event);
33302 filep->private_data = listener;
33303
33304 if (idev->info->open) {
33305@@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33306 return -EIO;
33307
33308 poll_wait(filep, &idev->wait, wait);
33309- if (listener->event_count != atomic_read(&idev->event))
33310+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33311 return POLLIN | POLLRDNORM;
33312 return 0;
33313 }
33314@@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33315 do {
33316 set_current_state(TASK_INTERRUPTIBLE);
33317
33318- event_count = atomic_read(&idev->event);
33319+ event_count = atomic_read_unchecked(&idev->event);
33320 if (event_count != listener->event_count) {
33321 if (copy_to_user(buf, &event_count, count))
33322 retval = -EFAULT;
33323@@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33324 static void uio_vma_open(struct vm_area_struct *vma)
33325 {
33326 struct uio_device *idev = vma->vm_private_data;
33327- idev->vma_count++;
33328+ local_inc(&idev->vma_count);
33329 }
33330
33331 static void uio_vma_close(struct vm_area_struct *vma)
33332 {
33333 struct uio_device *idev = vma->vm_private_data;
33334- idev->vma_count--;
33335+ local_dec(&idev->vma_count);
33336 }
33337
33338 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33339@@ -819,7 +820,7 @@ int __uio_register_device(struct module
33340 idev->owner = owner;
33341 idev->info = info;
33342 init_waitqueue_head(&idev->wait);
33343- atomic_set(&idev->event, 0);
33344+ atomic_set_unchecked(&idev->event, 0);
33345
33346 ret = uio_get_minor(idev);
33347 if (ret)
33348diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33349--- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33350+++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33351@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33352 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33353 if (ret < 2)
33354 return -EINVAL;
33355- if (index < 0 || index > 0x7f)
33356+ if (index > 0x7f)
33357 return -EINVAL;
33358 pos += tmp;
33359
33360diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33361--- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33362+++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33363@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33364 if (printk_ratelimit())
33365 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33366 __func__, vpi, vci);
33367- atomic_inc(&vcc->stats->rx_err);
33368+ atomic_inc_unchecked(&vcc->stats->rx_err);
33369 return;
33370 }
33371
33372@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33373 if (length > ATM_MAX_AAL5_PDU) {
33374 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33375 __func__, length, vcc);
33376- atomic_inc(&vcc->stats->rx_err);
33377+ atomic_inc_unchecked(&vcc->stats->rx_err);
33378 goto out;
33379 }
33380
33381@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33382 if (sarb->len < pdu_length) {
33383 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33384 __func__, pdu_length, sarb->len, vcc);
33385- atomic_inc(&vcc->stats->rx_err);
33386+ atomic_inc_unchecked(&vcc->stats->rx_err);
33387 goto out;
33388 }
33389
33390 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33391 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33392 __func__, vcc);
33393- atomic_inc(&vcc->stats->rx_err);
33394+ atomic_inc_unchecked(&vcc->stats->rx_err);
33395 goto out;
33396 }
33397
33398@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33399 if (printk_ratelimit())
33400 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33401 __func__, length);
33402- atomic_inc(&vcc->stats->rx_drop);
33403+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33404 goto out;
33405 }
33406
33407@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33408
33409 vcc->push(vcc, skb);
33410
33411- atomic_inc(&vcc->stats->rx);
33412+ atomic_inc_unchecked(&vcc->stats->rx);
33413 out:
33414 skb_trim(sarb, 0);
33415 }
33416@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33417 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33418
33419 usbatm_pop(vcc, skb);
33420- atomic_inc(&vcc->stats->tx);
33421+ atomic_inc_unchecked(&vcc->stats->tx);
33422
33423 skb = skb_dequeue(&instance->sndqueue);
33424 }
33425@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33426 if (!left--)
33427 return sprintf(page,
33428 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33429- atomic_read(&atm_dev->stats.aal5.tx),
33430- atomic_read(&atm_dev->stats.aal5.tx_err),
33431- atomic_read(&atm_dev->stats.aal5.rx),
33432- atomic_read(&atm_dev->stats.aal5.rx_err),
33433- atomic_read(&atm_dev->stats.aal5.rx_drop));
33434+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33435+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33436+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33437+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33438+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33439
33440 if (!left--) {
33441 if (instance->disconnected)
33442diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33443--- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33444+++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33445@@ -126,7 +126,7 @@ static const char *format_endpt =
33446 * time it gets called.
33447 */
33448 static struct device_connect_event {
33449- atomic_t count;
33450+ atomic_unchecked_t count;
33451 wait_queue_head_t wait;
33452 } device_event = {
33453 .count = ATOMIC_INIT(1),
33454@@ -164,7 +164,7 @@ static const struct class_info clas_info
33455
33456 void usbfs_conn_disc_event(void)
33457 {
33458- atomic_add(2, &device_event.count);
33459+ atomic_add_unchecked(2, &device_event.count);
33460 wake_up(&device_event.wait);
33461 }
33462
33463@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33464
33465 poll_wait(file, &device_event.wait, wait);
33466
33467- event_count = atomic_read(&device_event.count);
33468+ event_count = atomic_read_unchecked(&device_event.count);
33469 if (file->f_version != event_count) {
33470 file->f_version = event_count;
33471 return POLLIN | POLLRDNORM;
33472diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33473--- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33474+++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33475@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33476 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33477 if (buf) {
33478 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33479- if (len > 0) {
33480- smallbuf = kmalloc(++len, GFP_NOIO);
33481+ if (len++ > 0) {
33482+ smallbuf = kmalloc(len, GFP_NOIO);
33483 if (!smallbuf)
33484 return buf;
33485 memcpy(smallbuf, buf, len);
33486diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33487--- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33488+++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33489@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33490
33491 #ifdef CONFIG_KGDB
33492 static struct kgdb_io kgdbdbgp_io_ops;
33493-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33494+static struct kgdb_io kgdbdbgp_io_ops_console;
33495+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33496 #else
33497 #define dbgp_kgdb_mode (0)
33498 #endif
33499@@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33500 .write_char = kgdbdbgp_write_char,
33501 };
33502
33503+static struct kgdb_io kgdbdbgp_io_ops_console = {
33504+ .name = "kgdbdbgp",
33505+ .read_char = kgdbdbgp_read_char,
33506+ .write_char = kgdbdbgp_write_char,
33507+ .is_console = 1
33508+};
33509+
33510 static int kgdbdbgp_wait_time;
33511
33512 static int __init kgdbdbgp_parse_config(char *str)
33513@@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33514 ptr++;
33515 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33516 }
33517- kgdb_register_io_module(&kgdbdbgp_io_ops);
33518- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33519+ if (early_dbgp_console.index != -1)
33520+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33521+ else
33522+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33523
33524 return 0;
33525 }
33526diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33527--- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33528+++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33529@@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33530 unsigned int num_tests;
33531 int i, ret;
33532
33533+ pax_track_stack();
33534+
33535 num_tests = ARRAY_SIZE(simple_test_vector);
33536 for (i = 0; i < num_tests; i++) {
33537 ret = xhci_test_trb_in_td(xhci,
33538diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33539--- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33540+++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33541@@ -192,7 +192,7 @@ struct wahc {
33542 struct list_head xfer_delayed_list;
33543 spinlock_t xfer_list_lock;
33544 struct work_struct xfer_work;
33545- atomic_t xfer_id_count;
33546+ atomic_unchecked_t xfer_id_count;
33547 };
33548
33549
33550@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33551 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33552 spin_lock_init(&wa->xfer_list_lock);
33553 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33554- atomic_set(&wa->xfer_id_count, 1);
33555+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33556 }
33557
33558 /**
33559diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33560--- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33561+++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33562@@ -294,7 +294,7 @@ out:
33563 */
33564 static void wa_xfer_id_init(struct wa_xfer *xfer)
33565 {
33566- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33567+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33568 }
33569
33570 /*
33571diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33572--- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33573+++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33574@@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33575 return get_user(vq->last_used_idx, &used->idx);
33576 }
33577
33578-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33579+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33580 {
33581 struct file *eventfp, *filep = NULL,
33582 *pollstart = NULL, *pollstop = NULL;
33583diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33584--- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33585+++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33586@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33587 rc = -ENODEV;
33588 goto out;
33589 }
33590- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33591- !info->fbops->fb_setcmap)) {
33592+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33593 rc = -EINVAL;
33594 goto out1;
33595 }
33596diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33597--- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33598+++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33599@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33600 image->dx += image->width + 8;
33601 }
33602 } else if (rotate == FB_ROTATE_UD) {
33603- for (x = 0; x < num && image->dx >= 0; x++) {
33604+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33605 info->fbops->fb_imageblit(info, image);
33606 image->dx -= image->width + 8;
33607 }
33608@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33609 image->dy += image->height + 8;
33610 }
33611 } else if (rotate == FB_ROTATE_CCW) {
33612- for (x = 0; x < num && image->dy >= 0; x++) {
33613+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33614 info->fbops->fb_imageblit(info, image);
33615 image->dy -= image->height + 8;
33616 }
33617@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33618 int flags = info->flags;
33619 int ret = 0;
33620
33621+ pax_track_stack();
33622+
33623 if (var->activate & FB_ACTIVATE_INV_MODE) {
33624 struct fb_videomode mode1, mode2;
33625
33626@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33627 void __user *argp = (void __user *)arg;
33628 long ret = 0;
33629
33630+ pax_track_stack();
33631+
33632 switch (cmd) {
33633 case FBIOGET_VSCREENINFO:
33634 if (!lock_fb_info(info))
33635@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33636 return -EFAULT;
33637 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33638 return -EINVAL;
33639- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33640+ if (con2fb.framebuffer >= FB_MAX)
33641 return -EINVAL;
33642 if (!registered_fb[con2fb.framebuffer])
33643 request_module("fb%d", con2fb.framebuffer);
33644diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33645--- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33646+++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33647@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33648 }
33649 }
33650 printk("ringbuffer lockup!!!\n");
33651+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33652 i810_report_error(mmio);
33653 par->dev_flags |= LOCKUP;
33654 info->pixmap.scan_align = 1;
33655diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33656--- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33657+++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33658@@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33659 dlfb_urb_completion(urb);
33660
33661 error:
33662- atomic_add(bytes_sent, &dev->bytes_sent);
33663- atomic_add(bytes_identical, &dev->bytes_identical);
33664- atomic_add(width*height*2, &dev->bytes_rendered);
33665+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33666+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33667+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33668 end_cycles = get_cycles();
33669- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33670+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33671 >> 10)), /* Kcycles */
33672 &dev->cpu_kcycles_used);
33673
33674@@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33675 dlfb_urb_completion(urb);
33676
33677 error:
33678- atomic_add(bytes_sent, &dev->bytes_sent);
33679- atomic_add(bytes_identical, &dev->bytes_identical);
33680- atomic_add(bytes_rendered, &dev->bytes_rendered);
33681+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33682+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33683+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33684 end_cycles = get_cycles();
33685- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33686+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33687 >> 10)), /* Kcycles */
33688 &dev->cpu_kcycles_used);
33689 }
33690@@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33691 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33692 struct dlfb_data *dev = fb_info->par;
33693 return snprintf(buf, PAGE_SIZE, "%u\n",
33694- atomic_read(&dev->bytes_rendered));
33695+ atomic_read_unchecked(&dev->bytes_rendered));
33696 }
33697
33698 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33699@@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33700 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33701 struct dlfb_data *dev = fb_info->par;
33702 return snprintf(buf, PAGE_SIZE, "%u\n",
33703- atomic_read(&dev->bytes_identical));
33704+ atomic_read_unchecked(&dev->bytes_identical));
33705 }
33706
33707 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33708@@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33709 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33710 struct dlfb_data *dev = fb_info->par;
33711 return snprintf(buf, PAGE_SIZE, "%u\n",
33712- atomic_read(&dev->bytes_sent));
33713+ atomic_read_unchecked(&dev->bytes_sent));
33714 }
33715
33716 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33717@@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33718 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33719 struct dlfb_data *dev = fb_info->par;
33720 return snprintf(buf, PAGE_SIZE, "%u\n",
33721- atomic_read(&dev->cpu_kcycles_used));
33722+ atomic_read_unchecked(&dev->cpu_kcycles_used));
33723 }
33724
33725 static ssize_t edid_show(
33726@@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33728 struct dlfb_data *dev = fb_info->par;
33729
33730- atomic_set(&dev->bytes_rendered, 0);
33731- atomic_set(&dev->bytes_identical, 0);
33732- atomic_set(&dev->bytes_sent, 0);
33733- atomic_set(&dev->cpu_kcycles_used, 0);
33734+ atomic_set_unchecked(&dev->bytes_rendered, 0);
33735+ atomic_set_unchecked(&dev->bytes_identical, 0);
33736+ atomic_set_unchecked(&dev->bytes_sent, 0);
33737+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33738
33739 return count;
33740 }
33741diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33742--- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33743+++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33744@@ -19,6 +19,7 @@
33745 #include <linux/io.h>
33746 #include <linux/mutex.h>
33747 #include <linux/slab.h>
33748+#include <linux/moduleloader.h>
33749 #include <video/edid.h>
33750 #include <video/uvesafb.h>
33751 #ifdef CONFIG_X86
33752@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33753 NULL,
33754 };
33755
33756- return call_usermodehelper(v86d_path, argv, envp, 1);
33757+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33758 }
33759
33760 /*
33761@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33762 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33763 par->pmi_setpal = par->ypan = 0;
33764 } else {
33765+
33766+#ifdef CONFIG_PAX_KERNEXEC
33767+#ifdef CONFIG_MODULES
33768+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33769+#endif
33770+ if (!par->pmi_code) {
33771+ par->pmi_setpal = par->ypan = 0;
33772+ return 0;
33773+ }
33774+#endif
33775+
33776 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33777 + task->t.regs.edi);
33778+
33779+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33780+ pax_open_kernel();
33781+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33782+ pax_close_kernel();
33783+
33784+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33785+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33786+#else
33787 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33788 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33789+#endif
33790+
33791 printk(KERN_INFO "uvesafb: protected mode interface info at "
33792 "%04x:%04x\n",
33793 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33794@@ -1821,6 +1844,11 @@ out:
33795 if (par->vbe_modes)
33796 kfree(par->vbe_modes);
33797
33798+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33799+ if (par->pmi_code)
33800+ module_free_exec(NULL, par->pmi_code);
33801+#endif
33802+
33803 framebuffer_release(info);
33804 return err;
33805 }
33806@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33807 kfree(par->vbe_state_orig);
33808 if (par->vbe_state_saved)
33809 kfree(par->vbe_state_saved);
33810+
33811+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33812+ if (par->pmi_code)
33813+ module_free_exec(NULL, par->pmi_code);
33814+#endif
33815+
33816 }
33817
33818 framebuffer_release(info);
33819diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33820--- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33821+++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33822@@ -9,6 +9,7 @@
33823 */
33824
33825 #include <linux/module.h>
33826+#include <linux/moduleloader.h>
33827 #include <linux/kernel.h>
33828 #include <linux/errno.h>
33829 #include <linux/string.h>
33830@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33831 static int vram_total __initdata; /* Set total amount of memory */
33832 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33833 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33834-static void (*pmi_start)(void) __read_mostly;
33835-static void (*pmi_pal) (void) __read_mostly;
33836+static void (*pmi_start)(void) __read_only;
33837+static void (*pmi_pal) (void) __read_only;
33838 static int depth __read_mostly;
33839 static int vga_compat __read_mostly;
33840 /* --------------------------------------------------------------------- */
33841@@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33842 unsigned int size_vmode;
33843 unsigned int size_remap;
33844 unsigned int size_total;
33845+ void *pmi_code = NULL;
33846
33847 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33848 return -ENODEV;
33849@@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33850 size_remap = size_total;
33851 vesafb_fix.smem_len = size_remap;
33852
33853-#ifndef __i386__
33854- screen_info.vesapm_seg = 0;
33855-#endif
33856-
33857 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33858 printk(KERN_WARNING
33859 "vesafb: cannot reserve video memory at 0x%lx\n",
33860@@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33861 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33862 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33863
33864+#ifdef __i386__
33865+
33866+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33867+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
33868+ if (!pmi_code)
33869+#elif !defined(CONFIG_PAX_KERNEXEC)
33870+ if (0)
33871+#endif
33872+
33873+#endif
33874+ screen_info.vesapm_seg = 0;
33875+
33876 if (screen_info.vesapm_seg) {
33877- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33878- screen_info.vesapm_seg,screen_info.vesapm_off);
33879+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33880+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33881 }
33882
33883 if (screen_info.vesapm_seg < 0xc000)
33884@@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33885
33886 if (ypan || pmi_setpal) {
33887 unsigned short *pmi_base;
33888+
33889 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33890- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33891- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33892+
33893+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33894+ pax_open_kernel();
33895+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33896+#else
33897+ pmi_code = pmi_base;
33898+#endif
33899+
33900+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33901+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33902+
33903+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33904+ pmi_start = ktva_ktla(pmi_start);
33905+ pmi_pal = ktva_ktla(pmi_pal);
33906+ pax_close_kernel();
33907+#endif
33908+
33909 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33910 if (pmi_base[3]) {
33911 printk(KERN_INFO "vesafb: pmi: ports = ");
33912@@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33913 info->node, info->fix.id);
33914 return 0;
33915 err:
33916+
33917+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33918+ module_free_exec(NULL, pmi_code);
33919+#endif
33920+
33921 if (info->screen_base)
33922 iounmap(info->screen_base);
33923 framebuffer_release(info);
33924diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33925--- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33926+++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33927@@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33928 struct sysinfo i;
33929 int idx = 0;
33930
33931+ pax_track_stack();
33932+
33933 all_vm_events(events);
33934 si_meminfo(&i);
33935
33936diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33937--- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33938+++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33939@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33940 void
33941 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33942 {
33943- char *s = nd_get_link(nd);
33944+ const char *s = nd_get_link(nd);
33945
33946 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33947 IS_ERR(s) ? "<error>" : s);
33948diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33949--- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33950+++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33951@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33952 size += sizeof(struct io_event) * nr_events;
33953 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33954
33955- if (nr_pages < 0)
33956+ if (nr_pages <= 0)
33957 return -EINVAL;
33958
33959 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33960@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33961 struct aio_timeout to;
33962 int retry = 0;
33963
33964+ pax_track_stack();
33965+
33966 /* needed to zero any padding within an entry (there shouldn't be
33967 * any, but C is fun!
33968 */
33969@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33970 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33971 {
33972 ssize_t ret;
33973+ struct iovec iovstack;
33974
33975 #ifdef CONFIG_COMPAT
33976 if (compat)
33977 ret = compat_rw_copy_check_uvector(type,
33978 (struct compat_iovec __user *)kiocb->ki_buf,
33979- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33980+ kiocb->ki_nbytes, 1, &iovstack,
33981 &kiocb->ki_iovec);
33982 else
33983 #endif
33984 ret = rw_copy_check_uvector(type,
33985 (struct iovec __user *)kiocb->ki_buf,
33986- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33987+ kiocb->ki_nbytes, 1, &iovstack,
33988 &kiocb->ki_iovec);
33989 if (ret < 0)
33990 goto out;
33991
33992+ if (kiocb->ki_iovec == &iovstack) {
33993+ kiocb->ki_inline_vec = iovstack;
33994+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
33995+ }
33996 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33997 kiocb->ki_cur_seg = 0;
33998 /* ki_nbytes/left now reflect bytes instead of segs */
33999diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
34000--- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
34001+++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
34002@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
34003 unsigned long limit;
34004
34005 limit = rlimit(RLIMIT_FSIZE);
34006+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
34007 if (limit != RLIM_INFINITY && offset > limit)
34008 goto out_sig;
34009 if (offset > inode->i_sb->s_maxbytes)
34010diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
34011--- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
34012+++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
34013@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
34014 {
34015 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
34016 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
34017- char *link = nd_get_link(nd);
34018+ const char *link = nd_get_link(nd);
34019 if (!IS_ERR(link))
34020 kfree(link);
34021 }
34022diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
34023--- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
34024+++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
34025@@ -16,6 +16,7 @@
34026 #include <linux/string.h>
34027 #include <linux/fs.h>
34028 #include <linux/file.h>
34029+#include <linux/security.h>
34030 #include <linux/stat.h>
34031 #include <linux/fcntl.h>
34032 #include <linux/ptrace.h>
34033@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
34034 #endif
34035 # define START_STACK(u) ((void __user *)u.start_stack)
34036
34037+ memset(&dump, 0, sizeof(dump));
34038+
34039 fs = get_fs();
34040 set_fs(KERNEL_DS);
34041 has_dumped = 1;
34042@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
34043
34044 /* If the size of the dump file exceeds the rlimit, then see what would happen
34045 if we wrote the stack, but not the data area. */
34046+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
34047 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
34048 dump.u_dsize = 0;
34049
34050 /* Make sure we have enough room to write the stack and data areas. */
34051+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
34052 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
34053 dump.u_ssize = 0;
34054
34055@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
34056 rlim = rlimit(RLIMIT_DATA);
34057 if (rlim >= RLIM_INFINITY)
34058 rlim = ~0;
34059+
34060+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
34061 if (ex.a_data + ex.a_bss > rlim)
34062 return -ENOMEM;
34063
34064@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
34065 install_exec_creds(bprm);
34066 current->flags &= ~PF_FORKNOEXEC;
34067
34068+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34069+ current->mm->pax_flags = 0UL;
34070+#endif
34071+
34072+#ifdef CONFIG_PAX_PAGEEXEC
34073+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34074+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34075+
34076+#ifdef CONFIG_PAX_EMUTRAMP
34077+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34078+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34079+#endif
34080+
34081+#ifdef CONFIG_PAX_MPROTECT
34082+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34083+ current->mm->pax_flags |= MF_PAX_MPROTECT;
34084+#endif
34085+
34086+ }
34087+#endif
34088+
34089 if (N_MAGIC(ex) == OMAGIC) {
34090 unsigned long text_addr, map_size;
34091 loff_t pos;
34092@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34093
34094 down_write(&current->mm->mmap_sem);
34095 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34096- PROT_READ | PROT_WRITE | PROT_EXEC,
34097+ PROT_READ | PROT_WRITE,
34098 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34099 fd_offset + ex.a_text);
34100 up_write(&current->mm->mmap_sem);
34101diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
34102--- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
34103+++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
34104@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34105 #define elf_core_dump NULL
34106 #endif
34107
34108+#ifdef CONFIG_PAX_MPROTECT
34109+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34110+#endif
34111+
34112 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34113 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34114 #else
34115@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34116 .load_binary = load_elf_binary,
34117 .load_shlib = load_elf_library,
34118 .core_dump = elf_core_dump,
34119+
34120+#ifdef CONFIG_PAX_MPROTECT
34121+ .handle_mprotect= elf_handle_mprotect,
34122+#endif
34123+
34124 .min_coredump = ELF_EXEC_PAGESIZE,
34125 };
34126
34127@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34128
34129 static int set_brk(unsigned long start, unsigned long end)
34130 {
34131+ unsigned long e = end;
34132+
34133 start = ELF_PAGEALIGN(start);
34134 end = ELF_PAGEALIGN(end);
34135 if (end > start) {
34136@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34137 if (BAD_ADDR(addr))
34138 return addr;
34139 }
34140- current->mm->start_brk = current->mm->brk = end;
34141+ current->mm->start_brk = current->mm->brk = e;
34142 return 0;
34143 }
34144
34145@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34146 elf_addr_t __user *u_rand_bytes;
34147 const char *k_platform = ELF_PLATFORM;
34148 const char *k_base_platform = ELF_BASE_PLATFORM;
34149- unsigned char k_rand_bytes[16];
34150+ u32 k_rand_bytes[4];
34151 int items;
34152 elf_addr_t *elf_info;
34153 int ei_index = 0;
34154 const struct cred *cred = current_cred();
34155 struct vm_area_struct *vma;
34156+ unsigned long saved_auxv[AT_VECTOR_SIZE];
34157+
34158+ pax_track_stack();
34159
34160 /*
34161 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34162@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34163 * Generate 16 random bytes for userspace PRNG seeding.
34164 */
34165 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34166- u_rand_bytes = (elf_addr_t __user *)
34167- STACK_ALLOC(p, sizeof(k_rand_bytes));
34168+ srandom32(k_rand_bytes[0] ^ random32());
34169+ srandom32(k_rand_bytes[1] ^ random32());
34170+ srandom32(k_rand_bytes[2] ^ random32());
34171+ srandom32(k_rand_bytes[3] ^ random32());
34172+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
34173+ u_rand_bytes = (elf_addr_t __user *) p;
34174 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34175 return -EFAULT;
34176
34177@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34178 return -EFAULT;
34179 current->mm->env_end = p;
34180
34181+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34182+
34183 /* Put the elf_info on the stack in the right place. */
34184 sp = (elf_addr_t __user *)envp + 1;
34185- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34186+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34187 return -EFAULT;
34188 return 0;
34189 }
34190@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34191 {
34192 struct elf_phdr *elf_phdata;
34193 struct elf_phdr *eppnt;
34194- unsigned long load_addr = 0;
34195+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34196 int load_addr_set = 0;
34197 unsigned long last_bss = 0, elf_bss = 0;
34198- unsigned long error = ~0UL;
34199+ unsigned long error = -EINVAL;
34200 unsigned long total_size;
34201 int retval, i, size;
34202
34203@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34204 goto out_close;
34205 }
34206
34207+#ifdef CONFIG_PAX_SEGMEXEC
34208+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34209+ pax_task_size = SEGMEXEC_TASK_SIZE;
34210+#endif
34211+
34212 eppnt = elf_phdata;
34213 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34214 if (eppnt->p_type == PT_LOAD) {
34215@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34216 k = load_addr + eppnt->p_vaddr;
34217 if (BAD_ADDR(k) ||
34218 eppnt->p_filesz > eppnt->p_memsz ||
34219- eppnt->p_memsz > TASK_SIZE ||
34220- TASK_SIZE - eppnt->p_memsz < k) {
34221+ eppnt->p_memsz > pax_task_size ||
34222+ pax_task_size - eppnt->p_memsz < k) {
34223 error = -ENOMEM;
34224 goto out_close;
34225 }
34226@@ -528,6 +553,193 @@ out:
34227 return error;
34228 }
34229
34230+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34231+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34232+{
34233+ unsigned long pax_flags = 0UL;
34234+
34235+#ifdef CONFIG_PAX_PAGEEXEC
34236+ if (elf_phdata->p_flags & PF_PAGEEXEC)
34237+ pax_flags |= MF_PAX_PAGEEXEC;
34238+#endif
34239+
34240+#ifdef CONFIG_PAX_SEGMEXEC
34241+ if (elf_phdata->p_flags & PF_SEGMEXEC)
34242+ pax_flags |= MF_PAX_SEGMEXEC;
34243+#endif
34244+
34245+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34246+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34247+ if ((__supported_pte_mask & _PAGE_NX))
34248+ pax_flags &= ~MF_PAX_SEGMEXEC;
34249+ else
34250+ pax_flags &= ~MF_PAX_PAGEEXEC;
34251+ }
34252+#endif
34253+
34254+#ifdef CONFIG_PAX_EMUTRAMP
34255+ if (elf_phdata->p_flags & PF_EMUTRAMP)
34256+ pax_flags |= MF_PAX_EMUTRAMP;
34257+#endif
34258+
34259+#ifdef CONFIG_PAX_MPROTECT
34260+ if (elf_phdata->p_flags & PF_MPROTECT)
34261+ pax_flags |= MF_PAX_MPROTECT;
34262+#endif
34263+
34264+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34265+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34266+ pax_flags |= MF_PAX_RANDMMAP;
34267+#endif
34268+
34269+ return pax_flags;
34270+}
34271+#endif
34272+
34273+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34274+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34275+{
34276+ unsigned long pax_flags = 0UL;
34277+
34278+#ifdef CONFIG_PAX_PAGEEXEC
34279+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34280+ pax_flags |= MF_PAX_PAGEEXEC;
34281+#endif
34282+
34283+#ifdef CONFIG_PAX_SEGMEXEC
34284+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34285+ pax_flags |= MF_PAX_SEGMEXEC;
34286+#endif
34287+
34288+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34289+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34290+ if ((__supported_pte_mask & _PAGE_NX))
34291+ pax_flags &= ~MF_PAX_SEGMEXEC;
34292+ else
34293+ pax_flags &= ~MF_PAX_PAGEEXEC;
34294+ }
34295+#endif
34296+
34297+#ifdef CONFIG_PAX_EMUTRAMP
34298+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34299+ pax_flags |= MF_PAX_EMUTRAMP;
34300+#endif
34301+
34302+#ifdef CONFIG_PAX_MPROTECT
34303+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34304+ pax_flags |= MF_PAX_MPROTECT;
34305+#endif
34306+
34307+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34308+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34309+ pax_flags |= MF_PAX_RANDMMAP;
34310+#endif
34311+
34312+ return pax_flags;
34313+}
34314+#endif
34315+
34316+#ifdef CONFIG_PAX_EI_PAX
34317+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34318+{
34319+ unsigned long pax_flags = 0UL;
34320+
34321+#ifdef CONFIG_PAX_PAGEEXEC
34322+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34323+ pax_flags |= MF_PAX_PAGEEXEC;
34324+#endif
34325+
34326+#ifdef CONFIG_PAX_SEGMEXEC
34327+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34328+ pax_flags |= MF_PAX_SEGMEXEC;
34329+#endif
34330+
34331+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34332+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34333+ if ((__supported_pte_mask & _PAGE_NX))
34334+ pax_flags &= ~MF_PAX_SEGMEXEC;
34335+ else
34336+ pax_flags &= ~MF_PAX_PAGEEXEC;
34337+ }
34338+#endif
34339+
34340+#ifdef CONFIG_PAX_EMUTRAMP
34341+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34342+ pax_flags |= MF_PAX_EMUTRAMP;
34343+#endif
34344+
34345+#ifdef CONFIG_PAX_MPROTECT
34346+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34347+ pax_flags |= MF_PAX_MPROTECT;
34348+#endif
34349+
34350+#ifdef CONFIG_PAX_ASLR
34351+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34352+ pax_flags |= MF_PAX_RANDMMAP;
34353+#endif
34354+
34355+ return pax_flags;
34356+}
34357+#endif
34358+
34359+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34360+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34361+{
34362+ unsigned long pax_flags = 0UL;
34363+
34364+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34365+ unsigned long i;
34366+ int found_flags = 0;
34367+#endif
34368+
34369+#ifdef CONFIG_PAX_EI_PAX
34370+ pax_flags = pax_parse_ei_pax(elf_ex);
34371+#endif
34372+
34373+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34374+ for (i = 0UL; i < elf_ex->e_phnum; i++)
34375+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34376+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34377+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34378+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34379+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34380+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34381+ return -EINVAL;
34382+
34383+#ifdef CONFIG_PAX_SOFTMODE
34384+ if (pax_softmode)
34385+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
34386+ else
34387+#endif
34388+
34389+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34390+ found_flags = 1;
34391+ break;
34392+ }
34393+#endif
34394+
34395+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34396+ if (found_flags == 0) {
34397+ struct elf_phdr phdr;
34398+ memset(&phdr, 0, sizeof(phdr));
34399+ phdr.p_flags = PF_NOEMUTRAMP;
34400+#ifdef CONFIG_PAX_SOFTMODE
34401+ if (pax_softmode)
34402+ pax_flags = pax_parse_softmode(&phdr);
34403+ else
34404+#endif
34405+ pax_flags = pax_parse_hardmode(&phdr);
34406+ }
34407+#endif
34408+
34409+ if (0 > pax_check_flags(&pax_flags))
34410+ return -EINVAL;
34411+
34412+ current->mm->pax_flags = pax_flags;
34413+ return 0;
34414+}
34415+#endif
34416+
34417 /*
34418 * These are the functions used to load ELF style executables and shared
34419 * libraries. There is no binary dependent code anywhere else.
34420@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34421 {
34422 unsigned int random_variable = 0;
34423
34424+#ifdef CONFIG_PAX_RANDUSTACK
34425+ if (randomize_va_space)
34426+ return stack_top - current->mm->delta_stack;
34427+#endif
34428+
34429 if ((current->flags & PF_RANDOMIZE) &&
34430 !(current->personality & ADDR_NO_RANDOMIZE)) {
34431 random_variable = get_random_int() & STACK_RND_MASK;
34432@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34433 unsigned long load_addr = 0, load_bias = 0;
34434 int load_addr_set = 0;
34435 char * elf_interpreter = NULL;
34436- unsigned long error;
34437+ unsigned long error = 0;
34438 struct elf_phdr *elf_ppnt, *elf_phdata;
34439 unsigned long elf_bss, elf_brk;
34440 int retval, i;
34441@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34442 unsigned long start_code, end_code, start_data, end_data;
34443 unsigned long reloc_func_desc __maybe_unused = 0;
34444 int executable_stack = EXSTACK_DEFAULT;
34445- unsigned long def_flags = 0;
34446 struct {
34447 struct elfhdr elf_ex;
34448 struct elfhdr interp_elf_ex;
34449 } *loc;
34450+ unsigned long pax_task_size = TASK_SIZE;
34451
34452 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34453 if (!loc) {
34454@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34455
34456 /* OK, This is the point of no return */
34457 current->flags &= ~PF_FORKNOEXEC;
34458- current->mm->def_flags = def_flags;
34459+
34460+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34461+ current->mm->pax_flags = 0UL;
34462+#endif
34463+
34464+#ifdef CONFIG_PAX_DLRESOLVE
34465+ current->mm->call_dl_resolve = 0UL;
34466+#endif
34467+
34468+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34469+ current->mm->call_syscall = 0UL;
34470+#endif
34471+
34472+#ifdef CONFIG_PAX_ASLR
34473+ current->mm->delta_mmap = 0UL;
34474+ current->mm->delta_stack = 0UL;
34475+#endif
34476+
34477+ current->mm->def_flags = 0;
34478+
34479+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34480+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34481+ send_sig(SIGKILL, current, 0);
34482+ goto out_free_dentry;
34483+ }
34484+#endif
34485+
34486+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34487+ pax_set_initial_flags(bprm);
34488+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34489+ if (pax_set_initial_flags_func)
34490+ (pax_set_initial_flags_func)(bprm);
34491+#endif
34492+
34493+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34494+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34495+ current->mm->context.user_cs_limit = PAGE_SIZE;
34496+ current->mm->def_flags |= VM_PAGEEXEC;
34497+ }
34498+#endif
34499+
34500+#ifdef CONFIG_PAX_SEGMEXEC
34501+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34502+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34503+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34504+ pax_task_size = SEGMEXEC_TASK_SIZE;
34505+ current->mm->def_flags |= VM_NOHUGEPAGE;
34506+ }
34507+#endif
34508+
34509+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34510+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34511+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34512+ put_cpu();
34513+ }
34514+#endif
34515
34516 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34517 may depend on the personality. */
34518 SET_PERSONALITY(loc->elf_ex);
34519+
34520+#ifdef CONFIG_PAX_ASLR
34521+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34522+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34523+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34524+ }
34525+#endif
34526+
34527+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34528+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34529+ executable_stack = EXSTACK_DISABLE_X;
34530+ current->personality &= ~READ_IMPLIES_EXEC;
34531+ } else
34532+#endif
34533+
34534 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34535 current->personality |= READ_IMPLIES_EXEC;
34536
34537@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34538 #else
34539 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34540 #endif
34541+
34542+#ifdef CONFIG_PAX_RANDMMAP
34543+ /* PaX: randomize base address at the default exe base if requested */
34544+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34545+#ifdef CONFIG_SPARC64
34546+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34547+#else
34548+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34549+#endif
34550+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34551+ elf_flags |= MAP_FIXED;
34552+ }
34553+#endif
34554+
34555 }
34556
34557 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34558@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34559 * allowed task size. Note that p_filesz must always be
34560 * <= p_memsz so it is only necessary to check p_memsz.
34561 */
34562- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34563- elf_ppnt->p_memsz > TASK_SIZE ||
34564- TASK_SIZE - elf_ppnt->p_memsz < k) {
34565+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34566+ elf_ppnt->p_memsz > pax_task_size ||
34567+ pax_task_size - elf_ppnt->p_memsz < k) {
34568 /* set_brk can never work. Avoid overflows. */
34569 send_sig(SIGKILL, current, 0);
34570 retval = -EINVAL;
34571@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34572 start_data += load_bias;
34573 end_data += load_bias;
34574
34575+#ifdef CONFIG_PAX_RANDMMAP
34576+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34577+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34578+#endif
34579+
34580 /* Calling set_brk effectively mmaps the pages that we need
34581 * for the bss and break sections. We must do this before
34582 * mapping in the interpreter, to make sure it doesn't wind
34583@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34584 goto out_free_dentry;
34585 }
34586 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34587- send_sig(SIGSEGV, current, 0);
34588- retval = -EFAULT; /* Nobody gets to see this, but.. */
34589- goto out_free_dentry;
34590+ /*
34591+ * This bss-zeroing can fail if the ELF
34592+ * file specifies odd protections. So
34593+ * we don't check the return value
34594+ */
34595 }
34596
34597 if (elf_interpreter) {
34598@@ -1090,7 +1398,7 @@ out:
34599 * Decide what to dump of a segment, part, all or none.
34600 */
34601 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34602- unsigned long mm_flags)
34603+ unsigned long mm_flags, long signr)
34604 {
34605 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34606
34607@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34608 if (vma->vm_file == NULL)
34609 return 0;
34610
34611- if (FILTER(MAPPED_PRIVATE))
34612+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34613 goto whole;
34614
34615 /*
34616@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34617 {
34618 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34619 int i = 0;
34620- do
34621+ do {
34622 i += 2;
34623- while (auxv[i - 2] != AT_NULL);
34624+ } while (auxv[i - 2] != AT_NULL);
34625 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34626 }
34627
34628@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34629 }
34630
34631 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34632- unsigned long mm_flags)
34633+ struct coredump_params *cprm)
34634 {
34635 struct vm_area_struct *vma;
34636 size_t size = 0;
34637
34638 for (vma = first_vma(current, gate_vma); vma != NULL;
34639 vma = next_vma(vma, gate_vma))
34640- size += vma_dump_size(vma, mm_flags);
34641+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34642 return size;
34643 }
34644
34645@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34646
34647 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34648
34649- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34650+ offset += elf_core_vma_data_size(gate_vma, cprm);
34651 offset += elf_core_extra_data_size();
34652 e_shoff = offset;
34653
34654@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34655 offset = dataoff;
34656
34657 size += sizeof(*elf);
34658+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34659 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34660 goto end_coredump;
34661
34662 size += sizeof(*phdr4note);
34663+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34664 if (size > cprm->limit
34665 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34666 goto end_coredump;
34667@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34668 phdr.p_offset = offset;
34669 phdr.p_vaddr = vma->vm_start;
34670 phdr.p_paddr = 0;
34671- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34672+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34673 phdr.p_memsz = vma->vm_end - vma->vm_start;
34674 offset += phdr.p_filesz;
34675 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34676@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34677 phdr.p_align = ELF_EXEC_PAGESIZE;
34678
34679 size += sizeof(phdr);
34680+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34681 if (size > cprm->limit
34682 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34683 goto end_coredump;
34684@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34685 unsigned long addr;
34686 unsigned long end;
34687
34688- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34689+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34690
34691 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34692 struct page *page;
34693@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34694 page = get_dump_page(addr);
34695 if (page) {
34696 void *kaddr = kmap(page);
34697+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34698 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34699 !dump_write(cprm->file, kaddr,
34700 PAGE_SIZE);
34701@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34702
34703 if (e_phnum == PN_XNUM) {
34704 size += sizeof(*shdr4extnum);
34705+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34706 if (size > cprm->limit
34707 || !dump_write(cprm->file, shdr4extnum,
34708 sizeof(*shdr4extnum)))
34709@@ -2067,6 +2380,97 @@ out:
34710
34711 #endif /* CONFIG_ELF_CORE */
34712
34713+#ifdef CONFIG_PAX_MPROTECT
34714+/* PaX: non-PIC ELF libraries need relocations on their executable segments
34715+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34716+ * we'll remove VM_MAYWRITE for good on RELRO segments.
34717+ *
34718+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34719+ * basis because we want to allow the common case and not the special ones.
34720+ */
34721+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34722+{
34723+ struct elfhdr elf_h;
34724+ struct elf_phdr elf_p;
34725+ unsigned long i;
34726+ unsigned long oldflags;
34727+ bool is_textrel_rw, is_textrel_rx, is_relro;
34728+
34729+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34730+ return;
34731+
34732+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34733+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34734+
34735+#ifdef CONFIG_PAX_ELFRELOCS
34736+ /* possible TEXTREL */
34737+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34738+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34739+#else
34740+ is_textrel_rw = false;
34741+ is_textrel_rx = false;
34742+#endif
34743+
34744+ /* possible RELRO */
34745+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34746+
34747+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34748+ return;
34749+
34750+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34751+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34752+
34753+#ifdef CONFIG_PAX_ETEXECRELOCS
34754+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34755+#else
34756+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34757+#endif
34758+
34759+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34760+ !elf_check_arch(&elf_h) ||
34761+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34762+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34763+ return;
34764+
34765+ for (i = 0UL; i < elf_h.e_phnum; i++) {
34766+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34767+ return;
34768+ switch (elf_p.p_type) {
34769+ case PT_DYNAMIC:
34770+ if (!is_textrel_rw && !is_textrel_rx)
34771+ continue;
34772+ i = 0UL;
34773+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34774+ elf_dyn dyn;
34775+
34776+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34777+ return;
34778+ if (dyn.d_tag == DT_NULL)
34779+ return;
34780+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34781+ gr_log_textrel(vma);
34782+ if (is_textrel_rw)
34783+ vma->vm_flags |= VM_MAYWRITE;
34784+ else
34785+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34786+ vma->vm_flags &= ~VM_MAYWRITE;
34787+ return;
34788+ }
34789+ i++;
34790+ }
34791+ return;
34792+
34793+ case PT_GNU_RELRO:
34794+ if (!is_relro)
34795+ continue;
34796+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34797+ vma->vm_flags &= ~VM_MAYWRITE;
34798+ return;
34799+ }
34800+ }
34801+}
34802+#endif
34803+
34804 static int __init init_elf_binfmt(void)
34805 {
34806 return register_binfmt(&elf_format);
34807diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34808--- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34809+++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34810@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34811 realdatastart = (unsigned long) -ENOMEM;
34812 printk("Unable to allocate RAM for process data, errno %d\n",
34813 (int)-realdatastart);
34814+ down_write(&current->mm->mmap_sem);
34815 do_munmap(current->mm, textpos, text_len);
34816+ up_write(&current->mm->mmap_sem);
34817 ret = realdatastart;
34818 goto err;
34819 }
34820@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34821 }
34822 if (IS_ERR_VALUE(result)) {
34823 printk("Unable to read data+bss, errno %d\n", (int)-result);
34824+ down_write(&current->mm->mmap_sem);
34825 do_munmap(current->mm, textpos, text_len);
34826 do_munmap(current->mm, realdatastart, len);
34827+ up_write(&current->mm->mmap_sem);
34828 ret = result;
34829 goto err;
34830 }
34831@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34832 }
34833 if (IS_ERR_VALUE(result)) {
34834 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34835+ down_write(&current->mm->mmap_sem);
34836 do_munmap(current->mm, textpos, text_len + data_len + extra +
34837 MAX_SHARED_LIBS * sizeof(unsigned long));
34838+ up_write(&current->mm->mmap_sem);
34839 ret = result;
34840 goto err;
34841 }
34842diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34843--- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34844+++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34845@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34846 const int read = bio_data_dir(bio) == READ;
34847 struct bio_map_data *bmd = bio->bi_private;
34848 int i;
34849- char *p = bmd->sgvecs[0].iov_base;
34850+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
34851
34852 __bio_for_each_segment(bvec, bio, i, 0) {
34853 char *addr = page_address(bvec->bv_page);
34854diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34855--- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34856+++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34857@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34858 else if (bdev->bd_contains == bdev)
34859 return true; /* is a whole device which isn't held */
34860
34861- else if (whole->bd_holder == bd_may_claim)
34862+ else if (whole->bd_holder == (void *)bd_may_claim)
34863 return true; /* is a partition of a device that is being partitioned */
34864 else if (whole->bd_holder != NULL)
34865 return false; /* is a partition of a held device */
34866diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34867--- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34868+++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34869@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34870 free_extent_buffer(buf);
34871 add_root_to_dirty_list(root);
34872 } else {
34873- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34874- parent_start = parent->start;
34875- else
34876+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34877+ if (parent)
34878+ parent_start = parent->start;
34879+ else
34880+ parent_start = 0;
34881+ } else
34882 parent_start = 0;
34883
34884 WARN_ON(trans->transid != btrfs_header_generation(parent));
34885@@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34886
34887 ret = 0;
34888 if (slot == 0) {
34889- struct btrfs_disk_key disk_key;
34890 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34891 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34892 }
34893diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34894--- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34895+++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34896@@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34897 while(1) {
34898 if (entry->bytes < bytes ||
34899 (!entry->bitmap && entry->offset < min_start)) {
34900- struct rb_node *node;
34901-
34902 node = rb_next(&entry->offset_index);
34903 if (!node)
34904 break;
34905@@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34906 cluster, entry, bytes,
34907 min_start);
34908 if (ret == 0) {
34909- struct rb_node *node;
34910 node = rb_next(&entry->offset_index);
34911 if (!node)
34912 break;
34913diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34914--- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34915+++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34916@@ -6947,7 +6947,7 @@ fail:
34917 return -ENOMEM;
34918 }
34919
34920-static int btrfs_getattr(struct vfsmount *mnt,
34921+int btrfs_getattr(struct vfsmount *mnt,
34922 struct dentry *dentry, struct kstat *stat)
34923 {
34924 struct inode *inode = dentry->d_inode;
34925@@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34926 return 0;
34927 }
34928
34929+EXPORT_SYMBOL(btrfs_getattr);
34930+
34931+dev_t get_btrfs_dev_from_inode(struct inode *inode)
34932+{
34933+ return BTRFS_I(inode)->root->anon_super.s_dev;
34934+}
34935+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34936+
34937 /*
34938 * If a file is moved, it will inherit the cow and compression flags of the new
34939 * directory.
34940diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34941--- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34942+++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34943@@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34944 for (i = 0; i < num_types; i++) {
34945 struct btrfs_space_info *tmp;
34946
34947+ /* Don't copy in more than we allocated */
34948 if (!slot_count)
34949 break;
34950
34951+ slot_count--;
34952+
34953 info = NULL;
34954 rcu_read_lock();
34955 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34956@@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34957 memcpy(dest, &space, sizeof(space));
34958 dest++;
34959 space_args.total_spaces++;
34960- slot_count--;
34961 }
34962- if (!slot_count)
34963- break;
34964 }
34965 up_read(&info->groups_sem);
34966 }
34967diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34968--- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34969+++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34970@@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34971 }
34972 spin_unlock(&rc->reloc_root_tree.lock);
34973
34974- BUG_ON((struct btrfs_root *)node->data != root);
34975+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
34976
34977 if (!del) {
34978 spin_lock(&rc->reloc_root_tree.lock);
34979diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
34980--- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
34981+++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
34982@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34983 args);
34984
34985 /* start by checking things over */
34986- ASSERT(cache->fstop_percent >= 0 &&
34987- cache->fstop_percent < cache->fcull_percent &&
34988+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
34989 cache->fcull_percent < cache->frun_percent &&
34990 cache->frun_percent < 100);
34991
34992- ASSERT(cache->bstop_percent >= 0 &&
34993- cache->bstop_percent < cache->bcull_percent &&
34994+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
34995 cache->bcull_percent < cache->brun_percent &&
34996 cache->brun_percent < 100);
34997
34998diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
34999--- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
35000+++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
35001@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
35002 if (n > buflen)
35003 return -EMSGSIZE;
35004
35005- if (copy_to_user(_buffer, buffer, n) != 0)
35006+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
35007 return -EFAULT;
35008
35009 return n;
35010@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
35011 if (test_bit(CACHEFILES_DEAD, &cache->flags))
35012 return -EIO;
35013
35014- if (datalen < 0 || datalen > PAGE_SIZE - 1)
35015+ if (datalen > PAGE_SIZE - 1)
35016 return -EOPNOTSUPP;
35017
35018 /* drag the command string into the kernel so we can parse it */
35019@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
35020 if (args[0] != '%' || args[1] != '\0')
35021 return -EINVAL;
35022
35023- if (fstop < 0 || fstop >= cache->fcull_percent)
35024+ if (fstop >= cache->fcull_percent)
35025 return cachefiles_daemon_range_error(cache, args);
35026
35027 cache->fstop_percent = fstop;
35028@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
35029 if (args[0] != '%' || args[1] != '\0')
35030 return -EINVAL;
35031
35032- if (bstop < 0 || bstop >= cache->bcull_percent)
35033+ if (bstop >= cache->bcull_percent)
35034 return cachefiles_daemon_range_error(cache, args);
35035
35036 cache->bstop_percent = bstop;
35037diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
35038--- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
35039+++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
35040@@ -57,7 +57,7 @@ struct cachefiles_cache {
35041 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
35042 struct rb_root active_nodes; /* active nodes (can't be culled) */
35043 rwlock_t active_lock; /* lock for active_nodes */
35044- atomic_t gravecounter; /* graveyard uniquifier */
35045+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
35046 unsigned frun_percent; /* when to stop culling (% files) */
35047 unsigned fcull_percent; /* when to start culling (% files) */
35048 unsigned fstop_percent; /* when to stop allocating (% files) */
35049@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
35050 * proc.c
35051 */
35052 #ifdef CONFIG_CACHEFILES_HISTOGRAM
35053-extern atomic_t cachefiles_lookup_histogram[HZ];
35054-extern atomic_t cachefiles_mkdir_histogram[HZ];
35055-extern atomic_t cachefiles_create_histogram[HZ];
35056+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35057+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35058+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
35059
35060 extern int __init cachefiles_proc_init(void);
35061 extern void cachefiles_proc_cleanup(void);
35062 static inline
35063-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
35064+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
35065 {
35066 unsigned long jif = jiffies - start_jif;
35067 if (jif >= HZ)
35068 jif = HZ - 1;
35069- atomic_inc(&histogram[jif]);
35070+ atomic_inc_unchecked(&histogram[jif]);
35071 }
35072
35073 #else
35074diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
35075--- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
35076+++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
35077@@ -318,7 +318,7 @@ try_again:
35078 /* first step is to make up a grave dentry in the graveyard */
35079 sprintf(nbuffer, "%08x%08x",
35080 (uint32_t) get_seconds(),
35081- (uint32_t) atomic_inc_return(&cache->gravecounter));
35082+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
35083
35084 /* do the multiway lock magic */
35085 trap = lock_rename(cache->graveyard, dir);
35086diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
35087--- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
35088+++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
35089@@ -14,9 +14,9 @@
35090 #include <linux/seq_file.h>
35091 #include "internal.h"
35092
35093-atomic_t cachefiles_lookup_histogram[HZ];
35094-atomic_t cachefiles_mkdir_histogram[HZ];
35095-atomic_t cachefiles_create_histogram[HZ];
35096+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35097+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35098+atomic_unchecked_t cachefiles_create_histogram[HZ];
35099
35100 /*
35101 * display the latency histogram
35102@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35103 return 0;
35104 default:
35105 index = (unsigned long) v - 3;
35106- x = atomic_read(&cachefiles_lookup_histogram[index]);
35107- y = atomic_read(&cachefiles_mkdir_histogram[index]);
35108- z = atomic_read(&cachefiles_create_histogram[index]);
35109+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35110+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35111+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35112 if (x == 0 && y == 0 && z == 0)
35113 return 0;
35114
35115diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
35116--- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
35117+++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
35118@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35119 old_fs = get_fs();
35120 set_fs(KERNEL_DS);
35121 ret = file->f_op->write(
35122- file, (const void __user *) data, len, &pos);
35123+ file, (__force const void __user *) data, len, &pos);
35124 set_fs(old_fs);
35125 kunmap(page);
35126 if (ret != len)
35127diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
35128--- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
35129+++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
35130@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35131 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35132 struct ceph_mds_client *mdsc = fsc->mdsc;
35133 unsigned frag = fpos_frag(filp->f_pos);
35134- int off = fpos_off(filp->f_pos);
35135+ unsigned int off = fpos_off(filp->f_pos);
35136 int err;
35137 u32 ftype;
35138 struct ceph_mds_reply_info_parsed *rinfo;
35139@@ -360,7 +360,7 @@ more:
35140 rinfo = &fi->last_readdir->r_reply_info;
35141 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35142 rinfo->dir_nr, off, fi->offset);
35143- while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35144+ while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35145 u64 pos = ceph_make_fpos(frag, off);
35146 struct ceph_mds_reply_inode *in =
35147 rinfo->dir_in[off - fi->offset].in;
35148diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35149--- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35150+++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35151@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35152 tcon = list_entry(tmp3,
35153 struct cifsTconInfo,
35154 tcon_list);
35155- atomic_set(&tcon->num_smbs_sent, 0);
35156- atomic_set(&tcon->num_writes, 0);
35157- atomic_set(&tcon->num_reads, 0);
35158- atomic_set(&tcon->num_oplock_brks, 0);
35159- atomic_set(&tcon->num_opens, 0);
35160- atomic_set(&tcon->num_posixopens, 0);
35161- atomic_set(&tcon->num_posixmkdirs, 0);
35162- atomic_set(&tcon->num_closes, 0);
35163- atomic_set(&tcon->num_deletes, 0);
35164- atomic_set(&tcon->num_mkdirs, 0);
35165- atomic_set(&tcon->num_rmdirs, 0);
35166- atomic_set(&tcon->num_renames, 0);
35167- atomic_set(&tcon->num_t2renames, 0);
35168- atomic_set(&tcon->num_ffirst, 0);
35169- atomic_set(&tcon->num_fnext, 0);
35170- atomic_set(&tcon->num_fclose, 0);
35171- atomic_set(&tcon->num_hardlinks, 0);
35172- atomic_set(&tcon->num_symlinks, 0);
35173- atomic_set(&tcon->num_locks, 0);
35174+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35175+ atomic_set_unchecked(&tcon->num_writes, 0);
35176+ atomic_set_unchecked(&tcon->num_reads, 0);
35177+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35178+ atomic_set_unchecked(&tcon->num_opens, 0);
35179+ atomic_set_unchecked(&tcon->num_posixopens, 0);
35180+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35181+ atomic_set_unchecked(&tcon->num_closes, 0);
35182+ atomic_set_unchecked(&tcon->num_deletes, 0);
35183+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
35184+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
35185+ atomic_set_unchecked(&tcon->num_renames, 0);
35186+ atomic_set_unchecked(&tcon->num_t2renames, 0);
35187+ atomic_set_unchecked(&tcon->num_ffirst, 0);
35188+ atomic_set_unchecked(&tcon->num_fnext, 0);
35189+ atomic_set_unchecked(&tcon->num_fclose, 0);
35190+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
35191+ atomic_set_unchecked(&tcon->num_symlinks, 0);
35192+ atomic_set_unchecked(&tcon->num_locks, 0);
35193 }
35194 }
35195 }
35196@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35197 if (tcon->need_reconnect)
35198 seq_puts(m, "\tDISCONNECTED ");
35199 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35200- atomic_read(&tcon->num_smbs_sent),
35201- atomic_read(&tcon->num_oplock_brks));
35202+ atomic_read_unchecked(&tcon->num_smbs_sent),
35203+ atomic_read_unchecked(&tcon->num_oplock_brks));
35204 seq_printf(m, "\nReads: %d Bytes: %lld",
35205- atomic_read(&tcon->num_reads),
35206+ atomic_read_unchecked(&tcon->num_reads),
35207 (long long)(tcon->bytes_read));
35208 seq_printf(m, "\nWrites: %d Bytes: %lld",
35209- atomic_read(&tcon->num_writes),
35210+ atomic_read_unchecked(&tcon->num_writes),
35211 (long long)(tcon->bytes_written));
35212 seq_printf(m, "\nFlushes: %d",
35213- atomic_read(&tcon->num_flushes));
35214+ atomic_read_unchecked(&tcon->num_flushes));
35215 seq_printf(m, "\nLocks: %d HardLinks: %d "
35216 "Symlinks: %d",
35217- atomic_read(&tcon->num_locks),
35218- atomic_read(&tcon->num_hardlinks),
35219- atomic_read(&tcon->num_symlinks));
35220+ atomic_read_unchecked(&tcon->num_locks),
35221+ atomic_read_unchecked(&tcon->num_hardlinks),
35222+ atomic_read_unchecked(&tcon->num_symlinks));
35223 seq_printf(m, "\nOpens: %d Closes: %d "
35224 "Deletes: %d",
35225- atomic_read(&tcon->num_opens),
35226- atomic_read(&tcon->num_closes),
35227- atomic_read(&tcon->num_deletes));
35228+ atomic_read_unchecked(&tcon->num_opens),
35229+ atomic_read_unchecked(&tcon->num_closes),
35230+ atomic_read_unchecked(&tcon->num_deletes));
35231 seq_printf(m, "\nPosix Opens: %d "
35232 "Posix Mkdirs: %d",
35233- atomic_read(&tcon->num_posixopens),
35234- atomic_read(&tcon->num_posixmkdirs));
35235+ atomic_read_unchecked(&tcon->num_posixopens),
35236+ atomic_read_unchecked(&tcon->num_posixmkdirs));
35237 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35238- atomic_read(&tcon->num_mkdirs),
35239- atomic_read(&tcon->num_rmdirs));
35240+ atomic_read_unchecked(&tcon->num_mkdirs),
35241+ atomic_read_unchecked(&tcon->num_rmdirs));
35242 seq_printf(m, "\nRenames: %d T2 Renames %d",
35243- atomic_read(&tcon->num_renames),
35244- atomic_read(&tcon->num_t2renames));
35245+ atomic_read_unchecked(&tcon->num_renames),
35246+ atomic_read_unchecked(&tcon->num_t2renames));
35247 seq_printf(m, "\nFindFirst: %d FNext %d "
35248 "FClose %d",
35249- atomic_read(&tcon->num_ffirst),
35250- atomic_read(&tcon->num_fnext),
35251- atomic_read(&tcon->num_fclose));
35252+ atomic_read_unchecked(&tcon->num_ffirst),
35253+ atomic_read_unchecked(&tcon->num_fnext),
35254+ atomic_read_unchecked(&tcon->num_fclose));
35255 }
35256 }
35257 }
35258diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35259--- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35260+++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35261@@ -305,28 +305,28 @@ struct cifsTconInfo {
35262 __u16 Flags; /* optional support bits */
35263 enum statusEnum tidStatus;
35264 #ifdef CONFIG_CIFS_STATS
35265- atomic_t num_smbs_sent;
35266- atomic_t num_writes;
35267- atomic_t num_reads;
35268- atomic_t num_flushes;
35269- atomic_t num_oplock_brks;
35270- atomic_t num_opens;
35271- atomic_t num_closes;
35272- atomic_t num_deletes;
35273- atomic_t num_mkdirs;
35274- atomic_t num_posixopens;
35275- atomic_t num_posixmkdirs;
35276- atomic_t num_rmdirs;
35277- atomic_t num_renames;
35278- atomic_t num_t2renames;
35279- atomic_t num_ffirst;
35280- atomic_t num_fnext;
35281- atomic_t num_fclose;
35282- atomic_t num_hardlinks;
35283- atomic_t num_symlinks;
35284- atomic_t num_locks;
35285- atomic_t num_acl_get;
35286- atomic_t num_acl_set;
35287+ atomic_unchecked_t num_smbs_sent;
35288+ atomic_unchecked_t num_writes;
35289+ atomic_unchecked_t num_reads;
35290+ atomic_unchecked_t num_flushes;
35291+ atomic_unchecked_t num_oplock_brks;
35292+ atomic_unchecked_t num_opens;
35293+ atomic_unchecked_t num_closes;
35294+ atomic_unchecked_t num_deletes;
35295+ atomic_unchecked_t num_mkdirs;
35296+ atomic_unchecked_t num_posixopens;
35297+ atomic_unchecked_t num_posixmkdirs;
35298+ atomic_unchecked_t num_rmdirs;
35299+ atomic_unchecked_t num_renames;
35300+ atomic_unchecked_t num_t2renames;
35301+ atomic_unchecked_t num_ffirst;
35302+ atomic_unchecked_t num_fnext;
35303+ atomic_unchecked_t num_fclose;
35304+ atomic_unchecked_t num_hardlinks;
35305+ atomic_unchecked_t num_symlinks;
35306+ atomic_unchecked_t num_locks;
35307+ atomic_unchecked_t num_acl_get;
35308+ atomic_unchecked_t num_acl_set;
35309 #ifdef CONFIG_CIFS_STATS2
35310 unsigned long long time_writes;
35311 unsigned long long time_reads;
35312@@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35313 }
35314
35315 #ifdef CONFIG_CIFS_STATS
35316-#define cifs_stats_inc atomic_inc
35317+#define cifs_stats_inc atomic_inc_unchecked
35318
35319 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35320 unsigned int bytes)
35321diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35322--- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35323+++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35324@@ -577,7 +577,7 @@ symlink_exit:
35325
35326 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35327 {
35328- char *p = nd_get_link(nd);
35329+ const char *p = nd_get_link(nd);
35330 if (!IS_ERR(p))
35331 kfree(p);
35332 }
35333diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35334--- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35335+++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35336@@ -24,7 +24,7 @@
35337 #include "coda_linux.h"
35338 #include "coda_cache.h"
35339
35340-static atomic_t permission_epoch = ATOMIC_INIT(0);
35341+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35342
35343 /* replace or extend an acl cache hit */
35344 void coda_cache_enter(struct inode *inode, int mask)
35345@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35346 struct coda_inode_info *cii = ITOC(inode);
35347
35348 spin_lock(&cii->c_lock);
35349- cii->c_cached_epoch = atomic_read(&permission_epoch);
35350+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35351 if (cii->c_uid != current_fsuid()) {
35352 cii->c_uid = current_fsuid();
35353 cii->c_cached_perm = mask;
35354@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35355 {
35356 struct coda_inode_info *cii = ITOC(inode);
35357 spin_lock(&cii->c_lock);
35358- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35359+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35360 spin_unlock(&cii->c_lock);
35361 }
35362
35363 /* remove all acl caches */
35364 void coda_cache_clear_all(struct super_block *sb)
35365 {
35366- atomic_inc(&permission_epoch);
35367+ atomic_inc_unchecked(&permission_epoch);
35368 }
35369
35370
35371@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35372 spin_lock(&cii->c_lock);
35373 hit = (mask & cii->c_cached_perm) == mask &&
35374 cii->c_uid == current_fsuid() &&
35375- cii->c_cached_epoch == atomic_read(&permission_epoch);
35376+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35377 spin_unlock(&cii->c_lock);
35378
35379 return hit;
35380diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35381--- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35382+++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35383@@ -30,11 +30,13 @@
35384 #undef elf_phdr
35385 #undef elf_shdr
35386 #undef elf_note
35387+#undef elf_dyn
35388 #undef elf_addr_t
35389 #define elfhdr elf32_hdr
35390 #define elf_phdr elf32_phdr
35391 #define elf_shdr elf32_shdr
35392 #define elf_note elf32_note
35393+#define elf_dyn Elf32_Dyn
35394 #define elf_addr_t Elf32_Addr
35395
35396 /*
35397diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35398--- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35399+++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35400@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35401 goto out;
35402
35403 ret = -EINVAL;
35404- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35405+ if (nr_segs > UIO_MAXIOV)
35406 goto out;
35407 if (nr_segs > fast_segs) {
35408 ret = -ENOMEM;
35409@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35410
35411 struct compat_readdir_callback {
35412 struct compat_old_linux_dirent __user *dirent;
35413+ struct file * file;
35414 int result;
35415 };
35416
35417@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35418 buf->result = -EOVERFLOW;
35419 return -EOVERFLOW;
35420 }
35421+
35422+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35423+ return 0;
35424+
35425 buf->result++;
35426 dirent = buf->dirent;
35427 if (!access_ok(VERIFY_WRITE, dirent,
35428@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35429
35430 buf.result = 0;
35431 buf.dirent = dirent;
35432+ buf.file = file;
35433
35434 error = vfs_readdir(file, compat_fillonedir, &buf);
35435 if (buf.result)
35436@@ -917,6 +923,7 @@ struct compat_linux_dirent {
35437 struct compat_getdents_callback {
35438 struct compat_linux_dirent __user *current_dir;
35439 struct compat_linux_dirent __user *previous;
35440+ struct file * file;
35441 int count;
35442 int error;
35443 };
35444@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35445 buf->error = -EOVERFLOW;
35446 return -EOVERFLOW;
35447 }
35448+
35449+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35450+ return 0;
35451+
35452 dirent = buf->previous;
35453 if (dirent) {
35454 if (__put_user(offset, &dirent->d_off))
35455@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35456 buf.previous = NULL;
35457 buf.count = count;
35458 buf.error = 0;
35459+ buf.file = file;
35460
35461 error = vfs_readdir(file, compat_filldir, &buf);
35462 if (error >= 0)
35463@@ -1006,6 +1018,7 @@ out:
35464 struct compat_getdents_callback64 {
35465 struct linux_dirent64 __user *current_dir;
35466 struct linux_dirent64 __user *previous;
35467+ struct file * file;
35468 int count;
35469 int error;
35470 };
35471@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35472 buf->error = -EINVAL; /* only used if we fail.. */
35473 if (reclen > buf->count)
35474 return -EINVAL;
35475+
35476+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35477+ return 0;
35478+
35479 dirent = buf->previous;
35480
35481 if (dirent) {
35482@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35483 buf.previous = NULL;
35484 buf.count = count;
35485 buf.error = 0;
35486+ buf.file = file;
35487
35488 error = vfs_readdir(file, compat_filldir64, &buf);
35489 if (error >= 0)
35490@@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35491 compat_uptr_t __user *envp,
35492 struct pt_regs * regs)
35493 {
35494+#ifdef CONFIG_GRKERNSEC
35495+ struct file *old_exec_file;
35496+ struct acl_subject_label *old_acl;
35497+ struct rlimit old_rlim[RLIM_NLIMITS];
35498+#endif
35499 struct linux_binprm *bprm;
35500 struct file *file;
35501 struct files_struct *displaced;
35502@@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35503 bprm->filename = filename;
35504 bprm->interp = filename;
35505
35506+ if (gr_process_user_ban()) {
35507+ retval = -EPERM;
35508+ goto out_file;
35509+ }
35510+
35511+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35512+ retval = -EAGAIN;
35513+ if (gr_handle_nproc())
35514+ goto out_file;
35515+ retval = -EACCES;
35516+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35517+ goto out_file;
35518+
35519 retval = bprm_mm_init(bprm);
35520 if (retval)
35521 goto out_file;
35522@@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35523 if (retval < 0)
35524 goto out;
35525
35526+ if (!gr_tpe_allow(file)) {
35527+ retval = -EACCES;
35528+ goto out;
35529+ }
35530+
35531+ if (gr_check_crash_exec(file)) {
35532+ retval = -EACCES;
35533+ goto out;
35534+ }
35535+
35536+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35537+
35538+ gr_handle_exec_args_compat(bprm, argv);
35539+
35540+#ifdef CONFIG_GRKERNSEC
35541+ old_acl = current->acl;
35542+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35543+ old_exec_file = current->exec_file;
35544+ get_file(file);
35545+ current->exec_file = file;
35546+#endif
35547+
35548+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35549+ bprm->unsafe & LSM_UNSAFE_SHARE);
35550+ if (retval < 0)
35551+ goto out_fail;
35552+
35553 retval = search_binary_handler(bprm, regs);
35554 if (retval < 0)
35555- goto out;
35556+ goto out_fail;
35557+#ifdef CONFIG_GRKERNSEC
35558+ if (old_exec_file)
35559+ fput(old_exec_file);
35560+#endif
35561
35562 /* execve succeeded */
35563 current->fs->in_exec = 0;
35564@@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35565 put_files_struct(displaced);
35566 return retval;
35567
35568+out_fail:
35569+#ifdef CONFIG_GRKERNSEC
35570+ current->acl = old_acl;
35571+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35572+ fput(current->exec_file);
35573+ current->exec_file = old_exec_file;
35574+#endif
35575+
35576 out:
35577 if (bprm->mm) {
35578 acct_arg_size(bprm, 0);
35579@@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35580 struct fdtable *fdt;
35581 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35582
35583+ pax_track_stack();
35584+
35585 if (n < 0)
35586 goto out_nofds;
35587
35588diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35589--- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35590+++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35591@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35592
35593 err = get_user(palp, &up->palette);
35594 err |= get_user(length, &up->length);
35595+ if (err)
35596+ return -EFAULT;
35597
35598 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35599 err = put_user(compat_ptr(palp), &up_native->palette);
35600@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35601 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35602 {
35603 unsigned int a, b;
35604- a = *(unsigned int *)p;
35605- b = *(unsigned int *)q;
35606+ a = *(const unsigned int *)p;
35607+ b = *(const unsigned int *)q;
35608 if (a > b)
35609 return 1;
35610 if (a < b)
35611diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35612--- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35613+++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35614@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35615 }
35616 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35617 struct configfs_dirent *next;
35618- const char * name;
35619+ const unsigned char * name;
35620+ char d_name[sizeof(next->s_dentry->d_iname)];
35621 int len;
35622 struct inode *inode = NULL;
35623
35624@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35625 continue;
35626
35627 name = configfs_get_name(next);
35628- len = strlen(name);
35629+ if (next->s_dentry && name == next->s_dentry->d_iname) {
35630+ len = next->s_dentry->d_name.len;
35631+ memcpy(d_name, name, len);
35632+ name = d_name;
35633+ } else
35634+ len = strlen(name);
35635
35636 /*
35637 * We'll have a dentry and an inode for
35638diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35639--- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35640+++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35641@@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35642 mempages -= reserve;
35643
35644 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35645- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35646+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35647
35648 dcache_init();
35649 inode_init();
35650diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35651--- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35652+++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35653@@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35654 old_fs = get_fs();
35655 set_fs(get_ds());
35656 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35657- (char __user *)lower_buf,
35658+ (__force char __user *)lower_buf,
35659 lower_bufsiz);
35660 set_fs(old_fs);
35661 if (rc < 0)
35662@@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35663 }
35664 old_fs = get_fs();
35665 set_fs(get_ds());
35666- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35667+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35668 set_fs(old_fs);
35669 if (rc < 0) {
35670 kfree(buf);
35671@@ -684,7 +684,7 @@ out:
35672 static void
35673 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35674 {
35675- char *buf = nd_get_link(nd);
35676+ const char *buf = nd_get_link(nd);
35677 if (!IS_ERR(buf)) {
35678 /* Free the char* */
35679 kfree(buf);
35680diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35681--- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35682+++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35683@@ -328,7 +328,7 @@ check_list:
35684 goto out_unlock_msg_ctx;
35685 i = 5;
35686 if (msg_ctx->msg) {
35687- if (copy_to_user(&buf[i], packet_length, packet_length_size))
35688+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35689 goto out_unlock_msg_ctx;
35690 i += packet_length_size;
35691 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35692diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35693--- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35694+++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35695@@ -55,12 +55,24 @@
35696 #include <linux/fs_struct.h>
35697 #include <linux/pipe_fs_i.h>
35698 #include <linux/oom.h>
35699+#include <linux/random.h>
35700+#include <linux/seq_file.h>
35701+
35702+#ifdef CONFIG_PAX_REFCOUNT
35703+#include <linux/kallsyms.h>
35704+#include <linux/kdebug.h>
35705+#endif
35706
35707 #include <asm/uaccess.h>
35708 #include <asm/mmu_context.h>
35709 #include <asm/tlb.h>
35710 #include "internal.h"
35711
35712+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35713+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35714+EXPORT_SYMBOL(pax_set_initial_flags_func);
35715+#endif
35716+
35717 int core_uses_pid;
35718 char core_pattern[CORENAME_MAX_SIZE] = "core";
35719 unsigned int core_pipe_limit;
35720@@ -70,7 +82,7 @@ struct core_name {
35721 char *corename;
35722 int used, size;
35723 };
35724-static atomic_t call_count = ATOMIC_INIT(1);
35725+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35726
35727 /* The maximal length of core_pattern is also specified in sysctl.c */
35728
35729@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35730 char *tmp = getname(library);
35731 int error = PTR_ERR(tmp);
35732 static const struct open_flags uselib_flags = {
35733- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35734+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35735 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35736 .intent = LOOKUP_OPEN
35737 };
35738@@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35739 int write)
35740 {
35741 struct page *page;
35742- int ret;
35743
35744-#ifdef CONFIG_STACK_GROWSUP
35745- if (write) {
35746- ret = expand_stack_downwards(bprm->vma, pos);
35747- if (ret < 0)
35748- return NULL;
35749- }
35750-#endif
35751- ret = get_user_pages(current, bprm->mm, pos,
35752- 1, write, 1, &page, NULL);
35753- if (ret <= 0)
35754+ if (0 > expand_stack_downwards(bprm->vma, pos))
35755+ return NULL;
35756+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35757 return NULL;
35758
35759 if (write) {
35760@@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35761 vma->vm_end = STACK_TOP_MAX;
35762 vma->vm_start = vma->vm_end - PAGE_SIZE;
35763 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35764+
35765+#ifdef CONFIG_PAX_SEGMEXEC
35766+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35767+#endif
35768+
35769 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35770 INIT_LIST_HEAD(&vma->anon_vma_chain);
35771
35772@@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35773 mm->stack_vm = mm->total_vm = 1;
35774 up_write(&mm->mmap_sem);
35775 bprm->p = vma->vm_end - sizeof(void *);
35776+
35777+#ifdef CONFIG_PAX_RANDUSTACK
35778+ if (randomize_va_space)
35779+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35780+#endif
35781+
35782 return 0;
35783 err:
35784 up_write(&mm->mmap_sem);
35785@@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35786 int r;
35787 mm_segment_t oldfs = get_fs();
35788 set_fs(KERNEL_DS);
35789- r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35790+ r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35791 set_fs(oldfs);
35792 return r;
35793 }
35794@@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35795 unsigned long new_end = old_end - shift;
35796 struct mmu_gather *tlb;
35797
35798- BUG_ON(new_start > new_end);
35799+ if (new_start >= new_end || new_start < mmap_min_addr)
35800+ return -ENOMEM;
35801
35802 /*
35803 * ensure there are no vmas between where we want to go
35804@@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35805 if (vma != find_vma(mm, new_start))
35806 return -EFAULT;
35807
35808+#ifdef CONFIG_PAX_SEGMEXEC
35809+ BUG_ON(pax_find_mirror_vma(vma));
35810+#endif
35811+
35812 /*
35813 * cover the whole range: [new_start, old_end)
35814 */
35815@@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35816 stack_top = arch_align_stack(stack_top);
35817 stack_top = PAGE_ALIGN(stack_top);
35818
35819- if (unlikely(stack_top < mmap_min_addr) ||
35820- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35821- return -ENOMEM;
35822-
35823 stack_shift = vma->vm_end - stack_top;
35824
35825 bprm->p -= stack_shift;
35826@@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35827 bprm->exec -= stack_shift;
35828
35829 down_write(&mm->mmap_sem);
35830+
35831+ /* Move stack pages down in memory. */
35832+ if (stack_shift) {
35833+ ret = shift_arg_pages(vma, stack_shift);
35834+ if (ret)
35835+ goto out_unlock;
35836+ }
35837+
35838 vm_flags = VM_STACK_FLAGS;
35839
35840+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35841+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35842+ vm_flags &= ~VM_EXEC;
35843+
35844+#ifdef CONFIG_PAX_MPROTECT
35845+ if (mm->pax_flags & MF_PAX_MPROTECT)
35846+ vm_flags &= ~VM_MAYEXEC;
35847+#endif
35848+
35849+ }
35850+#endif
35851+
35852 /*
35853 * Adjust stack execute permissions; explicitly enable for
35854 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35855@@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35856 goto out_unlock;
35857 BUG_ON(prev != vma);
35858
35859- /* Move stack pages down in memory. */
35860- if (stack_shift) {
35861- ret = shift_arg_pages(vma, stack_shift);
35862- if (ret)
35863- goto out_unlock;
35864- }
35865-
35866 /* mprotect_fixup is overkill to remove the temporary stack flags */
35867 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35868
35869@@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35870 struct file *file;
35871 int err;
35872 static const struct open_flags open_exec_flags = {
35873- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35874+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35875 .acc_mode = MAY_EXEC | MAY_OPEN,
35876 .intent = LOOKUP_OPEN
35877 };
35878@@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35879 old_fs = get_fs();
35880 set_fs(get_ds());
35881 /* The cast to a user pointer is valid due to the set_fs() */
35882- result = vfs_read(file, (void __user *)addr, count, &pos);
35883+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
35884 set_fs(old_fs);
35885 return result;
35886 }
35887@@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35888 }
35889 rcu_read_unlock();
35890
35891- if (p->fs->users > n_fs) {
35892+ if (atomic_read(&p->fs->users) > n_fs) {
35893 bprm->unsafe |= LSM_UNSAFE_SHARE;
35894 } else {
35895 res = -EAGAIN;
35896@@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35897 const char __user *const __user *envp,
35898 struct pt_regs * regs)
35899 {
35900+#ifdef CONFIG_GRKERNSEC
35901+ struct file *old_exec_file;
35902+ struct acl_subject_label *old_acl;
35903+ struct rlimit old_rlim[RLIM_NLIMITS];
35904+#endif
35905 struct linux_binprm *bprm;
35906 struct file *file;
35907 struct files_struct *displaced;
35908@@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35909 bprm->filename = filename;
35910 bprm->interp = filename;
35911
35912+ if (gr_process_user_ban()) {
35913+ retval = -EPERM;
35914+ goto out_file;
35915+ }
35916+
35917+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35918+
35919+ if (gr_handle_nproc()) {
35920+ retval = -EAGAIN;
35921+ goto out_file;
35922+ }
35923+
35924+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35925+ retval = -EACCES;
35926+ goto out_file;
35927+ }
35928+
35929 retval = bprm_mm_init(bprm);
35930 if (retval)
35931 goto out_file;
35932@@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35933 if (retval < 0)
35934 goto out;
35935
35936+ if (!gr_tpe_allow(file)) {
35937+ retval = -EACCES;
35938+ goto out;
35939+ }
35940+
35941+ if (gr_check_crash_exec(file)) {
35942+ retval = -EACCES;
35943+ goto out;
35944+ }
35945+
35946+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35947+
35948+ gr_handle_exec_args(bprm, argv);
35949+
35950+#ifdef CONFIG_GRKERNSEC
35951+ old_acl = current->acl;
35952+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35953+ old_exec_file = current->exec_file;
35954+ get_file(file);
35955+ current->exec_file = file;
35956+#endif
35957+
35958+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35959+ bprm->unsafe & LSM_UNSAFE_SHARE);
35960+ if (retval < 0)
35961+ goto out_fail;
35962+
35963 retval = search_binary_handler(bprm,regs);
35964 if (retval < 0)
35965- goto out;
35966+ goto out_fail;
35967+#ifdef CONFIG_GRKERNSEC
35968+ if (old_exec_file)
35969+ fput(old_exec_file);
35970+#endif
35971
35972 /* execve succeeded */
35973 current->fs->in_exec = 0;
35974@@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35975 put_files_struct(displaced);
35976 return retval;
35977
35978+out_fail:
35979+#ifdef CONFIG_GRKERNSEC
35980+ current->acl = old_acl;
35981+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35982+ fput(current->exec_file);
35983+ current->exec_file = old_exec_file;
35984+#endif
35985+
35986 out:
35987 if (bprm->mm) {
35988 acct_arg_size(bprm, 0);
35989@@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
35990 {
35991 char *old_corename = cn->corename;
35992
35993- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35994+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35995 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35996
35997 if (!cn->corename) {
35998@@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
35999 int pid_in_pattern = 0;
36000 int err = 0;
36001
36002- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
36003+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
36004 cn->corename = kmalloc(cn->size, GFP_KERNEL);
36005 cn->used = 0;
36006
36007@@ -1645,6 +1735,219 @@ out:
36008 return ispipe;
36009 }
36010
36011+int pax_check_flags(unsigned long *flags)
36012+{
36013+ int retval = 0;
36014+
36015+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
36016+ if (*flags & MF_PAX_SEGMEXEC)
36017+ {
36018+ *flags &= ~MF_PAX_SEGMEXEC;
36019+ retval = -EINVAL;
36020+ }
36021+#endif
36022+
36023+ if ((*flags & MF_PAX_PAGEEXEC)
36024+
36025+#ifdef CONFIG_PAX_PAGEEXEC
36026+ && (*flags & MF_PAX_SEGMEXEC)
36027+#endif
36028+
36029+ )
36030+ {
36031+ *flags &= ~MF_PAX_PAGEEXEC;
36032+ retval = -EINVAL;
36033+ }
36034+
36035+ if ((*flags & MF_PAX_MPROTECT)
36036+
36037+#ifdef CONFIG_PAX_MPROTECT
36038+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36039+#endif
36040+
36041+ )
36042+ {
36043+ *flags &= ~MF_PAX_MPROTECT;
36044+ retval = -EINVAL;
36045+ }
36046+
36047+ if ((*flags & MF_PAX_EMUTRAMP)
36048+
36049+#ifdef CONFIG_PAX_EMUTRAMP
36050+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36051+#endif
36052+
36053+ )
36054+ {
36055+ *flags &= ~MF_PAX_EMUTRAMP;
36056+ retval = -EINVAL;
36057+ }
36058+
36059+ return retval;
36060+}
36061+
36062+EXPORT_SYMBOL(pax_check_flags);
36063+
36064+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36065+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
36066+{
36067+ struct task_struct *tsk = current;
36068+ struct mm_struct *mm = current->mm;
36069+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
36070+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
36071+ char *path_exec = NULL;
36072+ char *path_fault = NULL;
36073+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
36074+
36075+ if (buffer_exec && buffer_fault) {
36076+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36077+
36078+ down_read(&mm->mmap_sem);
36079+ vma = mm->mmap;
36080+ while (vma && (!vma_exec || !vma_fault)) {
36081+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36082+ vma_exec = vma;
36083+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36084+ vma_fault = vma;
36085+ vma = vma->vm_next;
36086+ }
36087+ if (vma_exec) {
36088+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36089+ if (IS_ERR(path_exec))
36090+ path_exec = "<path too long>";
36091+ else {
36092+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36093+ if (path_exec) {
36094+ *path_exec = 0;
36095+ path_exec = buffer_exec;
36096+ } else
36097+ path_exec = "<path too long>";
36098+ }
36099+ }
36100+ if (vma_fault) {
36101+ start = vma_fault->vm_start;
36102+ end = vma_fault->vm_end;
36103+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36104+ if (vma_fault->vm_file) {
36105+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36106+ if (IS_ERR(path_fault))
36107+ path_fault = "<path too long>";
36108+ else {
36109+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36110+ if (path_fault) {
36111+ *path_fault = 0;
36112+ path_fault = buffer_fault;
36113+ } else
36114+ path_fault = "<path too long>";
36115+ }
36116+ } else
36117+ path_fault = "<anonymous mapping>";
36118+ }
36119+ up_read(&mm->mmap_sem);
36120+ }
36121+ if (tsk->signal->curr_ip)
36122+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36123+ else
36124+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36125+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36126+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36127+ task_uid(tsk), task_euid(tsk), pc, sp);
36128+ free_page((unsigned long)buffer_exec);
36129+ free_page((unsigned long)buffer_fault);
36130+ pax_report_insns(pc, sp);
36131+ do_coredump(SIGKILL, SIGKILL, regs);
36132+}
36133+#endif
36134+
36135+#ifdef CONFIG_PAX_REFCOUNT
36136+void pax_report_refcount_overflow(struct pt_regs *regs)
36137+{
36138+ if (current->signal->curr_ip)
36139+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36140+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36141+ else
36142+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36143+ current->comm, task_pid_nr(current), current_uid(), current_euid());
36144+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36145+ show_regs(regs);
36146+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36147+}
36148+#endif
36149+
36150+#ifdef CONFIG_PAX_USERCOPY
36151+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36152+int object_is_on_stack(const void *obj, unsigned long len)
36153+{
36154+ const void * const stack = task_stack_page(current);
36155+ const void * const stackend = stack + THREAD_SIZE;
36156+
36157+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36158+ const void *frame = NULL;
36159+ const void *oldframe;
36160+#endif
36161+
36162+ if (obj + len < obj)
36163+ return -1;
36164+
36165+ if (obj + len <= stack || stackend <= obj)
36166+ return 0;
36167+
36168+ if (obj < stack || stackend < obj + len)
36169+ return -1;
36170+
36171+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36172+ oldframe = __builtin_frame_address(1);
36173+ if (oldframe)
36174+ frame = __builtin_frame_address(2);
36175+ /*
36176+ low ----------------------------------------------> high
36177+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
36178+ ^----------------^
36179+ allow copies only within here
36180+ */
36181+ while (stack <= frame && frame < stackend) {
36182+ /* if obj + len extends past the last frame, this
36183+ check won't pass and the next frame will be 0,
36184+ causing us to bail out and correctly report
36185+ the copy as invalid
36186+ */
36187+ if (obj + len <= frame)
36188+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36189+ oldframe = frame;
36190+ frame = *(const void * const *)frame;
36191+ }
36192+ return -1;
36193+#else
36194+ return 1;
36195+#endif
36196+}
36197+
36198+
36199+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36200+{
36201+ if (current->signal->curr_ip)
36202+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36203+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36204+ else
36205+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36206+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36207+ dump_stack();
36208+ gr_handle_kernel_exploit();
36209+ do_group_exit(SIGKILL);
36210+}
36211+#endif
36212+
36213+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36214+void pax_track_stack(void)
36215+{
36216+ unsigned long sp = (unsigned long)&sp;
36217+ if (sp < current_thread_info()->lowest_stack &&
36218+ sp > (unsigned long)task_stack_page(current))
36219+ current_thread_info()->lowest_stack = sp;
36220+}
36221+EXPORT_SYMBOL(pax_track_stack);
36222+#endif
36223+
36224 static int zap_process(struct task_struct *start, int exit_code)
36225 {
36226 struct task_struct *t;
36227@@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36228 pipe = file->f_path.dentry->d_inode->i_pipe;
36229
36230 pipe_lock(pipe);
36231- pipe->readers++;
36232- pipe->writers--;
36233+ atomic_inc(&pipe->readers);
36234+ atomic_dec(&pipe->writers);
36235
36236- while ((pipe->readers > 1) && (!signal_pending(current))) {
36237+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36238 wake_up_interruptible_sync(&pipe->wait);
36239 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36240 pipe_wait(pipe);
36241 }
36242
36243- pipe->readers--;
36244- pipe->writers++;
36245+ atomic_dec(&pipe->readers);
36246+ atomic_inc(&pipe->writers);
36247 pipe_unlock(pipe);
36248
36249 }
36250@@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36251 int retval = 0;
36252 int flag = 0;
36253 int ispipe;
36254- static atomic_t core_dump_count = ATOMIC_INIT(0);
36255+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36256 struct coredump_params cprm = {
36257 .signr = signr,
36258 .regs = regs,
36259@@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36260
36261 audit_core_dumps(signr);
36262
36263+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36264+ gr_handle_brute_attach(current, cprm.mm_flags);
36265+
36266 binfmt = mm->binfmt;
36267 if (!binfmt || !binfmt->core_dump)
36268 goto fail;
36269@@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36270 goto fail_corename;
36271 }
36272
36273+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36274+
36275 if (ispipe) {
36276 int dump_count;
36277 char **helper_argv;
36278@@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36279 }
36280 cprm.limit = RLIM_INFINITY;
36281
36282- dump_count = atomic_inc_return(&core_dump_count);
36283+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
36284 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36285 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36286 task_tgid_vnr(current), current->comm);
36287@@ -2078,7 +2386,7 @@ close_fail:
36288 filp_close(cprm.file, NULL);
36289 fail_dropcount:
36290 if (ispipe)
36291- atomic_dec(&core_dump_count);
36292+ atomic_dec_unchecked(&core_dump_count);
36293 fail_unlock:
36294 kfree(cn.corename);
36295 fail_corename:
36296diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36297--- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36298+++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36299@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36300
36301 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36302 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36303- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36304+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36305 sbi->s_resuid != current_fsuid() &&
36306 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36307 return 0;
36308diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36309--- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36310+++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36311@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36312
36313 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36314 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36315- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36316+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36317 sbi->s_resuid != current_fsuid() &&
36318 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36319 return 0;
36320diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36321--- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36322+++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36323@@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36324 /* Hm, nope. Are (enough) root reserved blocks available? */
36325 if (sbi->s_resuid == current_fsuid() ||
36326 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36327- capable(CAP_SYS_RESOURCE)) {
36328+ capable_nolog(CAP_SYS_RESOURCE)) {
36329 if (free_blocks >= (nblocks + dirty_blocks))
36330 return 1;
36331 }
36332diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36333--- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36334+++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36335@@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36336 unsigned long s_mb_last_start;
36337
36338 /* stats for buddy allocator */
36339- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36340- atomic_t s_bal_success; /* we found long enough chunks */
36341- atomic_t s_bal_allocated; /* in blocks */
36342- atomic_t s_bal_ex_scanned; /* total extents scanned */
36343- atomic_t s_bal_goals; /* goal hits */
36344- atomic_t s_bal_breaks; /* too long searches */
36345- atomic_t s_bal_2orders; /* 2^order hits */
36346+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36347+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36348+ atomic_unchecked_t s_bal_allocated; /* in blocks */
36349+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36350+ atomic_unchecked_t s_bal_goals; /* goal hits */
36351+ atomic_unchecked_t s_bal_breaks; /* too long searches */
36352+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36353 spinlock_t s_bal_lock;
36354 unsigned long s_mb_buddies_generated;
36355 unsigned long long s_mb_generation_time;
36356- atomic_t s_mb_lost_chunks;
36357- atomic_t s_mb_preallocated;
36358- atomic_t s_mb_discarded;
36359+ atomic_unchecked_t s_mb_lost_chunks;
36360+ atomic_unchecked_t s_mb_preallocated;
36361+ atomic_unchecked_t s_mb_discarded;
36362 atomic_t s_lock_busy;
36363
36364 /* locality groups */
36365diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36366--- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36367+++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36368@@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36369 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36370
36371 if (EXT4_SB(sb)->s_mb_stats)
36372- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36373+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36374
36375 break;
36376 }
36377@@ -2147,7 +2147,7 @@ repeat:
36378 ac->ac_status = AC_STATUS_CONTINUE;
36379 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36380 cr = 3;
36381- atomic_inc(&sbi->s_mb_lost_chunks);
36382+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36383 goto repeat;
36384 }
36385 }
36386@@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36387 ext4_grpblk_t counters[16];
36388 } sg;
36389
36390+ pax_track_stack();
36391+
36392 group--;
36393 if (group == 0)
36394 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36395@@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36396 if (sbi->s_mb_stats) {
36397 printk(KERN_INFO
36398 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36399- atomic_read(&sbi->s_bal_allocated),
36400- atomic_read(&sbi->s_bal_reqs),
36401- atomic_read(&sbi->s_bal_success));
36402+ atomic_read_unchecked(&sbi->s_bal_allocated),
36403+ atomic_read_unchecked(&sbi->s_bal_reqs),
36404+ atomic_read_unchecked(&sbi->s_bal_success));
36405 printk(KERN_INFO
36406 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36407 "%u 2^N hits, %u breaks, %u lost\n",
36408- atomic_read(&sbi->s_bal_ex_scanned),
36409- atomic_read(&sbi->s_bal_goals),
36410- atomic_read(&sbi->s_bal_2orders),
36411- atomic_read(&sbi->s_bal_breaks),
36412- atomic_read(&sbi->s_mb_lost_chunks));
36413+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36414+ atomic_read_unchecked(&sbi->s_bal_goals),
36415+ atomic_read_unchecked(&sbi->s_bal_2orders),
36416+ atomic_read_unchecked(&sbi->s_bal_breaks),
36417+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36418 printk(KERN_INFO
36419 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36420 sbi->s_mb_buddies_generated++,
36421 sbi->s_mb_generation_time);
36422 printk(KERN_INFO
36423 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36424- atomic_read(&sbi->s_mb_preallocated),
36425- atomic_read(&sbi->s_mb_discarded));
36426+ atomic_read_unchecked(&sbi->s_mb_preallocated),
36427+ atomic_read_unchecked(&sbi->s_mb_discarded));
36428 }
36429
36430 free_percpu(sbi->s_locality_groups);
36431@@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36432 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36433
36434 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36435- atomic_inc(&sbi->s_bal_reqs);
36436- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36437+ atomic_inc_unchecked(&sbi->s_bal_reqs);
36438+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36439 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36440- atomic_inc(&sbi->s_bal_success);
36441- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36442+ atomic_inc_unchecked(&sbi->s_bal_success);
36443+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36444 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36445 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36446- atomic_inc(&sbi->s_bal_goals);
36447+ atomic_inc_unchecked(&sbi->s_bal_goals);
36448 if (ac->ac_found > sbi->s_mb_max_to_scan)
36449- atomic_inc(&sbi->s_bal_breaks);
36450+ atomic_inc_unchecked(&sbi->s_bal_breaks);
36451 }
36452
36453 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36454@@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36455 trace_ext4_mb_new_inode_pa(ac, pa);
36456
36457 ext4_mb_use_inode_pa(ac, pa);
36458- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36459+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36460
36461 ei = EXT4_I(ac->ac_inode);
36462 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36463@@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36464 trace_ext4_mb_new_group_pa(ac, pa);
36465
36466 ext4_mb_use_group_pa(ac, pa);
36467- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36468+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36469
36470 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36471 lg = ac->ac_lg;
36472@@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36473 * from the bitmap and continue.
36474 */
36475 }
36476- atomic_add(free, &sbi->s_mb_discarded);
36477+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
36478
36479 return err;
36480 }
36481@@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36482 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36483 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36484 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36485- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36486+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36487 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36488
36489 return 0;
36490diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36491--- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36492+++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36493@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36494 if (err)
36495 return err;
36496
36497+ if (gr_handle_chroot_fowner(pid, type))
36498+ return -ENOENT;
36499+ if (gr_check_protected_task_fowner(pid, type))
36500+ return -EACCES;
36501+
36502 f_modown(filp, pid, type, force);
36503 return 0;
36504 }
36505@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36506 switch (cmd) {
36507 case F_DUPFD:
36508 case F_DUPFD_CLOEXEC:
36509+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36510 if (arg >= rlimit(RLIMIT_NOFILE))
36511 break;
36512 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36513@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36514 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36515 * is defined as O_NONBLOCK on some platforms and not on others.
36516 */
36517- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36518+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36519 O_RDONLY | O_WRONLY | O_RDWR |
36520 O_CREAT | O_EXCL | O_NOCTTY |
36521 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36522 __O_SYNC | O_DSYNC | FASYNC |
36523 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36524 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36525- __FMODE_EXEC | O_PATH
36526+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
36527 ));
36528
36529 fasync_cache = kmem_cache_create("fasync_cache",
36530diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36531--- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36532+++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36533@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36534 */
36535 filp->f_op = &read_pipefifo_fops;
36536 pipe->r_counter++;
36537- if (pipe->readers++ == 0)
36538+ if (atomic_inc_return(&pipe->readers) == 1)
36539 wake_up_partner(inode);
36540
36541- if (!pipe->writers) {
36542+ if (!atomic_read(&pipe->writers)) {
36543 if ((filp->f_flags & O_NONBLOCK)) {
36544 /* suppress POLLHUP until we have
36545 * seen a writer */
36546@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36547 * errno=ENXIO when there is no process reading the FIFO.
36548 */
36549 ret = -ENXIO;
36550- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36551+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36552 goto err;
36553
36554 filp->f_op = &write_pipefifo_fops;
36555 pipe->w_counter++;
36556- if (!pipe->writers++)
36557+ if (atomic_inc_return(&pipe->writers) == 1)
36558 wake_up_partner(inode);
36559
36560- if (!pipe->readers) {
36561+ if (!atomic_read(&pipe->readers)) {
36562 wait_for_partner(inode, &pipe->r_counter);
36563 if (signal_pending(current))
36564 goto err_wr;
36565@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36566 */
36567 filp->f_op = &rdwr_pipefifo_fops;
36568
36569- pipe->readers++;
36570- pipe->writers++;
36571+ atomic_inc(&pipe->readers);
36572+ atomic_inc(&pipe->writers);
36573 pipe->r_counter++;
36574 pipe->w_counter++;
36575- if (pipe->readers == 1 || pipe->writers == 1)
36576+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36577 wake_up_partner(inode);
36578 break;
36579
36580@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36581 return 0;
36582
36583 err_rd:
36584- if (!--pipe->readers)
36585+ if (atomic_dec_and_test(&pipe->readers))
36586 wake_up_interruptible(&pipe->wait);
36587 ret = -ERESTARTSYS;
36588 goto err;
36589
36590 err_wr:
36591- if (!--pipe->writers)
36592+ if (atomic_dec_and_test(&pipe->writers))
36593 wake_up_interruptible(&pipe->wait);
36594 ret = -ERESTARTSYS;
36595 goto err;
36596
36597 err:
36598- if (!pipe->readers && !pipe->writers)
36599+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36600 free_pipe_info(inode);
36601
36602 err_nocleanup:
36603diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36604--- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36605+++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36606@@ -15,6 +15,7 @@
36607 #include <linux/slab.h>
36608 #include <linux/vmalloc.h>
36609 #include <linux/file.h>
36610+#include <linux/security.h>
36611 #include <linux/fdtable.h>
36612 #include <linux/bitops.h>
36613 #include <linux/interrupt.h>
36614@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36615 * N.B. For clone tasks sharing a files structure, this test
36616 * will limit the total number of files that can be opened.
36617 */
36618+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36619 if (nr >= rlimit(RLIMIT_NOFILE))
36620 return -EMFILE;
36621
36622diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36623--- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36624+++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36625@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36626 int len = dot ? dot - name : strlen(name);
36627
36628 fs = __get_fs_type(name, len);
36629+
36630+#ifdef CONFIG_GRKERNSEC_MODHARDEN
36631+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36632+#else
36633 if (!fs && (request_module("%.*s", len, name) == 0))
36634+#endif
36635 fs = __get_fs_type(name, len);
36636
36637 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36638diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36639--- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36640+++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36641@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36642 parent ? (char *) parent->def->name : "<no-parent>",
36643 def->name, netfs_data);
36644
36645- fscache_stat(&fscache_n_acquires);
36646+ fscache_stat_unchecked(&fscache_n_acquires);
36647
36648 /* if there's no parent cookie, then we don't create one here either */
36649 if (!parent) {
36650- fscache_stat(&fscache_n_acquires_null);
36651+ fscache_stat_unchecked(&fscache_n_acquires_null);
36652 _leave(" [no parent]");
36653 return NULL;
36654 }
36655@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36656 /* allocate and initialise a cookie */
36657 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36658 if (!cookie) {
36659- fscache_stat(&fscache_n_acquires_oom);
36660+ fscache_stat_unchecked(&fscache_n_acquires_oom);
36661 _leave(" [ENOMEM]");
36662 return NULL;
36663 }
36664@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36665
36666 switch (cookie->def->type) {
36667 case FSCACHE_COOKIE_TYPE_INDEX:
36668- fscache_stat(&fscache_n_cookie_index);
36669+ fscache_stat_unchecked(&fscache_n_cookie_index);
36670 break;
36671 case FSCACHE_COOKIE_TYPE_DATAFILE:
36672- fscache_stat(&fscache_n_cookie_data);
36673+ fscache_stat_unchecked(&fscache_n_cookie_data);
36674 break;
36675 default:
36676- fscache_stat(&fscache_n_cookie_special);
36677+ fscache_stat_unchecked(&fscache_n_cookie_special);
36678 break;
36679 }
36680
36681@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36682 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36683 atomic_dec(&parent->n_children);
36684 __fscache_cookie_put(cookie);
36685- fscache_stat(&fscache_n_acquires_nobufs);
36686+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36687 _leave(" = NULL");
36688 return NULL;
36689 }
36690 }
36691
36692- fscache_stat(&fscache_n_acquires_ok);
36693+ fscache_stat_unchecked(&fscache_n_acquires_ok);
36694 _leave(" = %p", cookie);
36695 return cookie;
36696 }
36697@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36698 cache = fscache_select_cache_for_object(cookie->parent);
36699 if (!cache) {
36700 up_read(&fscache_addremove_sem);
36701- fscache_stat(&fscache_n_acquires_no_cache);
36702+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36703 _leave(" = -ENOMEDIUM [no cache]");
36704 return -ENOMEDIUM;
36705 }
36706@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36707 object = cache->ops->alloc_object(cache, cookie);
36708 fscache_stat_d(&fscache_n_cop_alloc_object);
36709 if (IS_ERR(object)) {
36710- fscache_stat(&fscache_n_object_no_alloc);
36711+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
36712 ret = PTR_ERR(object);
36713 goto error;
36714 }
36715
36716- fscache_stat(&fscache_n_object_alloc);
36717+ fscache_stat_unchecked(&fscache_n_object_alloc);
36718
36719 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36720
36721@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36722 struct fscache_object *object;
36723 struct hlist_node *_p;
36724
36725- fscache_stat(&fscache_n_updates);
36726+ fscache_stat_unchecked(&fscache_n_updates);
36727
36728 if (!cookie) {
36729- fscache_stat(&fscache_n_updates_null);
36730+ fscache_stat_unchecked(&fscache_n_updates_null);
36731 _leave(" [no cookie]");
36732 return;
36733 }
36734@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36735 struct fscache_object *object;
36736 unsigned long event;
36737
36738- fscache_stat(&fscache_n_relinquishes);
36739+ fscache_stat_unchecked(&fscache_n_relinquishes);
36740 if (retire)
36741- fscache_stat(&fscache_n_relinquishes_retire);
36742+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36743
36744 if (!cookie) {
36745- fscache_stat(&fscache_n_relinquishes_null);
36746+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
36747 _leave(" [no cookie]");
36748 return;
36749 }
36750@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36751
36752 /* wait for the cookie to finish being instantiated (or to fail) */
36753 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36754- fscache_stat(&fscache_n_relinquishes_waitcrt);
36755+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36756 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36757 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36758 }
36759diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36760--- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36761+++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36762@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36763 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36764 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36765
36766-extern atomic_t fscache_n_op_pend;
36767-extern atomic_t fscache_n_op_run;
36768-extern atomic_t fscache_n_op_enqueue;
36769-extern atomic_t fscache_n_op_deferred_release;
36770-extern atomic_t fscache_n_op_release;
36771-extern atomic_t fscache_n_op_gc;
36772-extern atomic_t fscache_n_op_cancelled;
36773-extern atomic_t fscache_n_op_rejected;
36774-
36775-extern atomic_t fscache_n_attr_changed;
36776-extern atomic_t fscache_n_attr_changed_ok;
36777-extern atomic_t fscache_n_attr_changed_nobufs;
36778-extern atomic_t fscache_n_attr_changed_nomem;
36779-extern atomic_t fscache_n_attr_changed_calls;
36780-
36781-extern atomic_t fscache_n_allocs;
36782-extern atomic_t fscache_n_allocs_ok;
36783-extern atomic_t fscache_n_allocs_wait;
36784-extern atomic_t fscache_n_allocs_nobufs;
36785-extern atomic_t fscache_n_allocs_intr;
36786-extern atomic_t fscache_n_allocs_object_dead;
36787-extern atomic_t fscache_n_alloc_ops;
36788-extern atomic_t fscache_n_alloc_op_waits;
36789-
36790-extern atomic_t fscache_n_retrievals;
36791-extern atomic_t fscache_n_retrievals_ok;
36792-extern atomic_t fscache_n_retrievals_wait;
36793-extern atomic_t fscache_n_retrievals_nodata;
36794-extern atomic_t fscache_n_retrievals_nobufs;
36795-extern atomic_t fscache_n_retrievals_intr;
36796-extern atomic_t fscache_n_retrievals_nomem;
36797-extern atomic_t fscache_n_retrievals_object_dead;
36798-extern atomic_t fscache_n_retrieval_ops;
36799-extern atomic_t fscache_n_retrieval_op_waits;
36800-
36801-extern atomic_t fscache_n_stores;
36802-extern atomic_t fscache_n_stores_ok;
36803-extern atomic_t fscache_n_stores_again;
36804-extern atomic_t fscache_n_stores_nobufs;
36805-extern atomic_t fscache_n_stores_oom;
36806-extern atomic_t fscache_n_store_ops;
36807-extern atomic_t fscache_n_store_calls;
36808-extern atomic_t fscache_n_store_pages;
36809-extern atomic_t fscache_n_store_radix_deletes;
36810-extern atomic_t fscache_n_store_pages_over_limit;
36811-
36812-extern atomic_t fscache_n_store_vmscan_not_storing;
36813-extern atomic_t fscache_n_store_vmscan_gone;
36814-extern atomic_t fscache_n_store_vmscan_busy;
36815-extern atomic_t fscache_n_store_vmscan_cancelled;
36816-
36817-extern atomic_t fscache_n_marks;
36818-extern atomic_t fscache_n_uncaches;
36819-
36820-extern atomic_t fscache_n_acquires;
36821-extern atomic_t fscache_n_acquires_null;
36822-extern atomic_t fscache_n_acquires_no_cache;
36823-extern atomic_t fscache_n_acquires_ok;
36824-extern atomic_t fscache_n_acquires_nobufs;
36825-extern atomic_t fscache_n_acquires_oom;
36826-
36827-extern atomic_t fscache_n_updates;
36828-extern atomic_t fscache_n_updates_null;
36829-extern atomic_t fscache_n_updates_run;
36830-
36831-extern atomic_t fscache_n_relinquishes;
36832-extern atomic_t fscache_n_relinquishes_null;
36833-extern atomic_t fscache_n_relinquishes_waitcrt;
36834-extern atomic_t fscache_n_relinquishes_retire;
36835-
36836-extern atomic_t fscache_n_cookie_index;
36837-extern atomic_t fscache_n_cookie_data;
36838-extern atomic_t fscache_n_cookie_special;
36839-
36840-extern atomic_t fscache_n_object_alloc;
36841-extern atomic_t fscache_n_object_no_alloc;
36842-extern atomic_t fscache_n_object_lookups;
36843-extern atomic_t fscache_n_object_lookups_negative;
36844-extern atomic_t fscache_n_object_lookups_positive;
36845-extern atomic_t fscache_n_object_lookups_timed_out;
36846-extern atomic_t fscache_n_object_created;
36847-extern atomic_t fscache_n_object_avail;
36848-extern atomic_t fscache_n_object_dead;
36849-
36850-extern atomic_t fscache_n_checkaux_none;
36851-extern atomic_t fscache_n_checkaux_okay;
36852-extern atomic_t fscache_n_checkaux_update;
36853-extern atomic_t fscache_n_checkaux_obsolete;
36854+extern atomic_unchecked_t fscache_n_op_pend;
36855+extern atomic_unchecked_t fscache_n_op_run;
36856+extern atomic_unchecked_t fscache_n_op_enqueue;
36857+extern atomic_unchecked_t fscache_n_op_deferred_release;
36858+extern atomic_unchecked_t fscache_n_op_release;
36859+extern atomic_unchecked_t fscache_n_op_gc;
36860+extern atomic_unchecked_t fscache_n_op_cancelled;
36861+extern atomic_unchecked_t fscache_n_op_rejected;
36862+
36863+extern atomic_unchecked_t fscache_n_attr_changed;
36864+extern atomic_unchecked_t fscache_n_attr_changed_ok;
36865+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36866+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36867+extern atomic_unchecked_t fscache_n_attr_changed_calls;
36868+
36869+extern atomic_unchecked_t fscache_n_allocs;
36870+extern atomic_unchecked_t fscache_n_allocs_ok;
36871+extern atomic_unchecked_t fscache_n_allocs_wait;
36872+extern atomic_unchecked_t fscache_n_allocs_nobufs;
36873+extern atomic_unchecked_t fscache_n_allocs_intr;
36874+extern atomic_unchecked_t fscache_n_allocs_object_dead;
36875+extern atomic_unchecked_t fscache_n_alloc_ops;
36876+extern atomic_unchecked_t fscache_n_alloc_op_waits;
36877+
36878+extern atomic_unchecked_t fscache_n_retrievals;
36879+extern atomic_unchecked_t fscache_n_retrievals_ok;
36880+extern atomic_unchecked_t fscache_n_retrievals_wait;
36881+extern atomic_unchecked_t fscache_n_retrievals_nodata;
36882+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36883+extern atomic_unchecked_t fscache_n_retrievals_intr;
36884+extern atomic_unchecked_t fscache_n_retrievals_nomem;
36885+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36886+extern atomic_unchecked_t fscache_n_retrieval_ops;
36887+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36888+
36889+extern atomic_unchecked_t fscache_n_stores;
36890+extern atomic_unchecked_t fscache_n_stores_ok;
36891+extern atomic_unchecked_t fscache_n_stores_again;
36892+extern atomic_unchecked_t fscache_n_stores_nobufs;
36893+extern atomic_unchecked_t fscache_n_stores_oom;
36894+extern atomic_unchecked_t fscache_n_store_ops;
36895+extern atomic_unchecked_t fscache_n_store_calls;
36896+extern atomic_unchecked_t fscache_n_store_pages;
36897+extern atomic_unchecked_t fscache_n_store_radix_deletes;
36898+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36899+
36900+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36901+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36902+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36903+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36904+
36905+extern atomic_unchecked_t fscache_n_marks;
36906+extern atomic_unchecked_t fscache_n_uncaches;
36907+
36908+extern atomic_unchecked_t fscache_n_acquires;
36909+extern atomic_unchecked_t fscache_n_acquires_null;
36910+extern atomic_unchecked_t fscache_n_acquires_no_cache;
36911+extern atomic_unchecked_t fscache_n_acquires_ok;
36912+extern atomic_unchecked_t fscache_n_acquires_nobufs;
36913+extern atomic_unchecked_t fscache_n_acquires_oom;
36914+
36915+extern atomic_unchecked_t fscache_n_updates;
36916+extern atomic_unchecked_t fscache_n_updates_null;
36917+extern atomic_unchecked_t fscache_n_updates_run;
36918+
36919+extern atomic_unchecked_t fscache_n_relinquishes;
36920+extern atomic_unchecked_t fscache_n_relinquishes_null;
36921+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36922+extern atomic_unchecked_t fscache_n_relinquishes_retire;
36923+
36924+extern atomic_unchecked_t fscache_n_cookie_index;
36925+extern atomic_unchecked_t fscache_n_cookie_data;
36926+extern atomic_unchecked_t fscache_n_cookie_special;
36927+
36928+extern atomic_unchecked_t fscache_n_object_alloc;
36929+extern atomic_unchecked_t fscache_n_object_no_alloc;
36930+extern atomic_unchecked_t fscache_n_object_lookups;
36931+extern atomic_unchecked_t fscache_n_object_lookups_negative;
36932+extern atomic_unchecked_t fscache_n_object_lookups_positive;
36933+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36934+extern atomic_unchecked_t fscache_n_object_created;
36935+extern atomic_unchecked_t fscache_n_object_avail;
36936+extern atomic_unchecked_t fscache_n_object_dead;
36937+
36938+extern atomic_unchecked_t fscache_n_checkaux_none;
36939+extern atomic_unchecked_t fscache_n_checkaux_okay;
36940+extern atomic_unchecked_t fscache_n_checkaux_update;
36941+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36942
36943 extern atomic_t fscache_n_cop_alloc_object;
36944 extern atomic_t fscache_n_cop_lookup_object;
36945@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36946 atomic_inc(stat);
36947 }
36948
36949+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36950+{
36951+ atomic_inc_unchecked(stat);
36952+}
36953+
36954 static inline void fscache_stat_d(atomic_t *stat)
36955 {
36956 atomic_dec(stat);
36957@@ -267,6 +272,7 @@ extern const struct file_operations fsca
36958
36959 #define __fscache_stat(stat) (NULL)
36960 #define fscache_stat(stat) do {} while (0)
36961+#define fscache_stat_unchecked(stat) do {} while (0)
36962 #define fscache_stat_d(stat) do {} while (0)
36963 #endif
36964
36965diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36966--- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36967+++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36968@@ -128,7 +128,7 @@ static void fscache_object_state_machine
36969 /* update the object metadata on disk */
36970 case FSCACHE_OBJECT_UPDATING:
36971 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36972- fscache_stat(&fscache_n_updates_run);
36973+ fscache_stat_unchecked(&fscache_n_updates_run);
36974 fscache_stat(&fscache_n_cop_update_object);
36975 object->cache->ops->update_object(object);
36976 fscache_stat_d(&fscache_n_cop_update_object);
36977@@ -217,7 +217,7 @@ static void fscache_object_state_machine
36978 spin_lock(&object->lock);
36979 object->state = FSCACHE_OBJECT_DEAD;
36980 spin_unlock(&object->lock);
36981- fscache_stat(&fscache_n_object_dead);
36982+ fscache_stat_unchecked(&fscache_n_object_dead);
36983 goto terminal_transit;
36984
36985 /* handle the parent cache of this object being withdrawn from
36986@@ -232,7 +232,7 @@ static void fscache_object_state_machine
36987 spin_lock(&object->lock);
36988 object->state = FSCACHE_OBJECT_DEAD;
36989 spin_unlock(&object->lock);
36990- fscache_stat(&fscache_n_object_dead);
36991+ fscache_stat_unchecked(&fscache_n_object_dead);
36992 goto terminal_transit;
36993
36994 /* complain about the object being woken up once it is
36995@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36996 parent->cookie->def->name, cookie->def->name,
36997 object->cache->tag->name);
36998
36999- fscache_stat(&fscache_n_object_lookups);
37000+ fscache_stat_unchecked(&fscache_n_object_lookups);
37001 fscache_stat(&fscache_n_cop_lookup_object);
37002 ret = object->cache->ops->lookup_object(object);
37003 fscache_stat_d(&fscache_n_cop_lookup_object);
37004@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
37005 if (ret == -ETIMEDOUT) {
37006 /* probably stuck behind another object, so move this one to
37007 * the back of the queue */
37008- fscache_stat(&fscache_n_object_lookups_timed_out);
37009+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
37010 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37011 }
37012
37013@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
37014
37015 spin_lock(&object->lock);
37016 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37017- fscache_stat(&fscache_n_object_lookups_negative);
37018+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
37019
37020 /* transit here to allow write requests to begin stacking up
37021 * and read requests to begin returning ENODATA */
37022@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
37023 * result, in which case there may be data available */
37024 spin_lock(&object->lock);
37025 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37026- fscache_stat(&fscache_n_object_lookups_positive);
37027+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
37028
37029 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
37030
37031@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
37032 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37033 } else {
37034 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
37035- fscache_stat(&fscache_n_object_created);
37036+ fscache_stat_unchecked(&fscache_n_object_created);
37037
37038 object->state = FSCACHE_OBJECT_AVAILABLE;
37039 spin_unlock(&object->lock);
37040@@ -602,7 +602,7 @@ static void fscache_object_available(str
37041 fscache_enqueue_dependents(object);
37042
37043 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
37044- fscache_stat(&fscache_n_object_avail);
37045+ fscache_stat_unchecked(&fscache_n_object_avail);
37046
37047 _leave("");
37048 }
37049@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
37050 enum fscache_checkaux result;
37051
37052 if (!object->cookie->def->check_aux) {
37053- fscache_stat(&fscache_n_checkaux_none);
37054+ fscache_stat_unchecked(&fscache_n_checkaux_none);
37055 return FSCACHE_CHECKAUX_OKAY;
37056 }
37057
37058@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
37059 switch (result) {
37060 /* entry okay as is */
37061 case FSCACHE_CHECKAUX_OKAY:
37062- fscache_stat(&fscache_n_checkaux_okay);
37063+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
37064 break;
37065
37066 /* entry requires update */
37067 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
37068- fscache_stat(&fscache_n_checkaux_update);
37069+ fscache_stat_unchecked(&fscache_n_checkaux_update);
37070 break;
37071
37072 /* entry requires deletion */
37073 case FSCACHE_CHECKAUX_OBSOLETE:
37074- fscache_stat(&fscache_n_checkaux_obsolete);
37075+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37076 break;
37077
37078 default:
37079diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
37080--- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
37081+++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
37082@@ -17,7 +17,7 @@
37083 #include <linux/slab.h>
37084 #include "internal.h"
37085
37086-atomic_t fscache_op_debug_id;
37087+atomic_unchecked_t fscache_op_debug_id;
37088 EXPORT_SYMBOL(fscache_op_debug_id);
37089
37090 /**
37091@@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
37092 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37093 ASSERTCMP(atomic_read(&op->usage), >, 0);
37094
37095- fscache_stat(&fscache_n_op_enqueue);
37096+ fscache_stat_unchecked(&fscache_n_op_enqueue);
37097 switch (op->flags & FSCACHE_OP_TYPE) {
37098 case FSCACHE_OP_ASYNC:
37099 _debug("queue async");
37100@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
37101 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37102 if (op->processor)
37103 fscache_enqueue_operation(op);
37104- fscache_stat(&fscache_n_op_run);
37105+ fscache_stat_unchecked(&fscache_n_op_run);
37106 }
37107
37108 /*
37109@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
37110 if (object->n_ops > 1) {
37111 atomic_inc(&op->usage);
37112 list_add_tail(&op->pend_link, &object->pending_ops);
37113- fscache_stat(&fscache_n_op_pend);
37114+ fscache_stat_unchecked(&fscache_n_op_pend);
37115 } else if (!list_empty(&object->pending_ops)) {
37116 atomic_inc(&op->usage);
37117 list_add_tail(&op->pend_link, &object->pending_ops);
37118- fscache_stat(&fscache_n_op_pend);
37119+ fscache_stat_unchecked(&fscache_n_op_pend);
37120 fscache_start_operations(object);
37121 } else {
37122 ASSERTCMP(object->n_in_progress, ==, 0);
37123@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
37124 object->n_exclusive++; /* reads and writes must wait */
37125 atomic_inc(&op->usage);
37126 list_add_tail(&op->pend_link, &object->pending_ops);
37127- fscache_stat(&fscache_n_op_pend);
37128+ fscache_stat_unchecked(&fscache_n_op_pend);
37129 ret = 0;
37130 } else {
37131 /* not allowed to submit ops in any other state */
37132@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
37133 if (object->n_exclusive > 0) {
37134 atomic_inc(&op->usage);
37135 list_add_tail(&op->pend_link, &object->pending_ops);
37136- fscache_stat(&fscache_n_op_pend);
37137+ fscache_stat_unchecked(&fscache_n_op_pend);
37138 } else if (!list_empty(&object->pending_ops)) {
37139 atomic_inc(&op->usage);
37140 list_add_tail(&op->pend_link, &object->pending_ops);
37141- fscache_stat(&fscache_n_op_pend);
37142+ fscache_stat_unchecked(&fscache_n_op_pend);
37143 fscache_start_operations(object);
37144 } else {
37145 ASSERTCMP(object->n_exclusive, ==, 0);
37146@@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37147 object->n_ops++;
37148 atomic_inc(&op->usage);
37149 list_add_tail(&op->pend_link, &object->pending_ops);
37150- fscache_stat(&fscache_n_op_pend);
37151+ fscache_stat_unchecked(&fscache_n_op_pend);
37152 ret = 0;
37153 } else if (object->state == FSCACHE_OBJECT_DYING ||
37154 object->state == FSCACHE_OBJECT_LC_DYING ||
37155 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37156- fscache_stat(&fscache_n_op_rejected);
37157+ fscache_stat_unchecked(&fscache_n_op_rejected);
37158 ret = -ENOBUFS;
37159 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37160 fscache_report_unexpected_submission(object, op, ostate);
37161@@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37162
37163 ret = -EBUSY;
37164 if (!list_empty(&op->pend_link)) {
37165- fscache_stat(&fscache_n_op_cancelled);
37166+ fscache_stat_unchecked(&fscache_n_op_cancelled);
37167 list_del_init(&op->pend_link);
37168 object->n_ops--;
37169 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37170@@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37171 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37172 BUG();
37173
37174- fscache_stat(&fscache_n_op_release);
37175+ fscache_stat_unchecked(&fscache_n_op_release);
37176
37177 if (op->release) {
37178 op->release(op);
37179@@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37180 * lock, and defer it otherwise */
37181 if (!spin_trylock(&object->lock)) {
37182 _debug("defer put");
37183- fscache_stat(&fscache_n_op_deferred_release);
37184+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
37185
37186 cache = object->cache;
37187 spin_lock(&cache->op_gc_list_lock);
37188@@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37189
37190 _debug("GC DEFERRED REL OBJ%x OP%x",
37191 object->debug_id, op->debug_id);
37192- fscache_stat(&fscache_n_op_gc);
37193+ fscache_stat_unchecked(&fscache_n_op_gc);
37194
37195 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37196
37197diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37198--- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37199+++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37200@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37201 val = radix_tree_lookup(&cookie->stores, page->index);
37202 if (!val) {
37203 rcu_read_unlock();
37204- fscache_stat(&fscache_n_store_vmscan_not_storing);
37205+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37206 __fscache_uncache_page(cookie, page);
37207 return true;
37208 }
37209@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37210 spin_unlock(&cookie->stores_lock);
37211
37212 if (xpage) {
37213- fscache_stat(&fscache_n_store_vmscan_cancelled);
37214- fscache_stat(&fscache_n_store_radix_deletes);
37215+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37216+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37217 ASSERTCMP(xpage, ==, page);
37218 } else {
37219- fscache_stat(&fscache_n_store_vmscan_gone);
37220+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37221 }
37222
37223 wake_up_bit(&cookie->flags, 0);
37224@@ -107,7 +107,7 @@ page_busy:
37225 /* we might want to wait here, but that could deadlock the allocator as
37226 * the work threads writing to the cache may all end up sleeping
37227 * on memory allocation */
37228- fscache_stat(&fscache_n_store_vmscan_busy);
37229+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37230 return false;
37231 }
37232 EXPORT_SYMBOL(__fscache_maybe_release_page);
37233@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37234 FSCACHE_COOKIE_STORING_TAG);
37235 if (!radix_tree_tag_get(&cookie->stores, page->index,
37236 FSCACHE_COOKIE_PENDING_TAG)) {
37237- fscache_stat(&fscache_n_store_radix_deletes);
37238+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37239 xpage = radix_tree_delete(&cookie->stores, page->index);
37240 }
37241 spin_unlock(&cookie->stores_lock);
37242@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37243
37244 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37245
37246- fscache_stat(&fscache_n_attr_changed_calls);
37247+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37248
37249 if (fscache_object_is_active(object)) {
37250 fscache_set_op_state(op, "CallFS");
37251@@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37252
37253 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37254
37255- fscache_stat(&fscache_n_attr_changed);
37256+ fscache_stat_unchecked(&fscache_n_attr_changed);
37257
37258 op = kzalloc(sizeof(*op), GFP_KERNEL);
37259 if (!op) {
37260- fscache_stat(&fscache_n_attr_changed_nomem);
37261+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37262 _leave(" = -ENOMEM");
37263 return -ENOMEM;
37264 }
37265@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37266 if (fscache_submit_exclusive_op(object, op) < 0)
37267 goto nobufs;
37268 spin_unlock(&cookie->lock);
37269- fscache_stat(&fscache_n_attr_changed_ok);
37270+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37271 fscache_put_operation(op);
37272 _leave(" = 0");
37273 return 0;
37274@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37275 nobufs:
37276 spin_unlock(&cookie->lock);
37277 kfree(op);
37278- fscache_stat(&fscache_n_attr_changed_nobufs);
37279+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37280 _leave(" = %d", -ENOBUFS);
37281 return -ENOBUFS;
37282 }
37283@@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37284 /* allocate a retrieval operation and attempt to submit it */
37285 op = kzalloc(sizeof(*op), GFP_NOIO);
37286 if (!op) {
37287- fscache_stat(&fscache_n_retrievals_nomem);
37288+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37289 return NULL;
37290 }
37291
37292@@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37293 return 0;
37294 }
37295
37296- fscache_stat(&fscache_n_retrievals_wait);
37297+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
37298
37299 jif = jiffies;
37300 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37301 fscache_wait_bit_interruptible,
37302 TASK_INTERRUPTIBLE) != 0) {
37303- fscache_stat(&fscache_n_retrievals_intr);
37304+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37305 _leave(" = -ERESTARTSYS");
37306 return -ERESTARTSYS;
37307 }
37308@@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37309 */
37310 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37311 struct fscache_retrieval *op,
37312- atomic_t *stat_op_waits,
37313- atomic_t *stat_object_dead)
37314+ atomic_unchecked_t *stat_op_waits,
37315+ atomic_unchecked_t *stat_object_dead)
37316 {
37317 int ret;
37318
37319@@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37320 goto check_if_dead;
37321
37322 _debug(">>> WT");
37323- fscache_stat(stat_op_waits);
37324+ fscache_stat_unchecked(stat_op_waits);
37325 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37326 fscache_wait_bit_interruptible,
37327 TASK_INTERRUPTIBLE) < 0) {
37328@@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37329
37330 check_if_dead:
37331 if (unlikely(fscache_object_is_dead(object))) {
37332- fscache_stat(stat_object_dead);
37333+ fscache_stat_unchecked(stat_object_dead);
37334 return -ENOBUFS;
37335 }
37336 return 0;
37337@@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37338
37339 _enter("%p,%p,,,", cookie, page);
37340
37341- fscache_stat(&fscache_n_retrievals);
37342+ fscache_stat_unchecked(&fscache_n_retrievals);
37343
37344 if (hlist_empty(&cookie->backing_objects))
37345 goto nobufs;
37346@@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37347 goto nobufs_unlock;
37348 spin_unlock(&cookie->lock);
37349
37350- fscache_stat(&fscache_n_retrieval_ops);
37351+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37352
37353 /* pin the netfs read context in case we need to do the actual netfs
37354 * read because we've encountered a cache read failure */
37355@@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37356
37357 error:
37358 if (ret == -ENOMEM)
37359- fscache_stat(&fscache_n_retrievals_nomem);
37360+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37361 else if (ret == -ERESTARTSYS)
37362- fscache_stat(&fscache_n_retrievals_intr);
37363+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37364 else if (ret == -ENODATA)
37365- fscache_stat(&fscache_n_retrievals_nodata);
37366+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37367 else if (ret < 0)
37368- fscache_stat(&fscache_n_retrievals_nobufs);
37369+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37370 else
37371- fscache_stat(&fscache_n_retrievals_ok);
37372+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37373
37374 fscache_put_retrieval(op);
37375 _leave(" = %d", ret);
37376@@ -434,7 +434,7 @@ nobufs_unlock:
37377 spin_unlock(&cookie->lock);
37378 kfree(op);
37379 nobufs:
37380- fscache_stat(&fscache_n_retrievals_nobufs);
37381+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37382 _leave(" = -ENOBUFS");
37383 return -ENOBUFS;
37384 }
37385@@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37386
37387 _enter("%p,,%d,,,", cookie, *nr_pages);
37388
37389- fscache_stat(&fscache_n_retrievals);
37390+ fscache_stat_unchecked(&fscache_n_retrievals);
37391
37392 if (hlist_empty(&cookie->backing_objects))
37393 goto nobufs;
37394@@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37395 goto nobufs_unlock;
37396 spin_unlock(&cookie->lock);
37397
37398- fscache_stat(&fscache_n_retrieval_ops);
37399+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37400
37401 /* pin the netfs read context in case we need to do the actual netfs
37402 * read because we've encountered a cache read failure */
37403@@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37404
37405 error:
37406 if (ret == -ENOMEM)
37407- fscache_stat(&fscache_n_retrievals_nomem);
37408+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37409 else if (ret == -ERESTARTSYS)
37410- fscache_stat(&fscache_n_retrievals_intr);
37411+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37412 else if (ret == -ENODATA)
37413- fscache_stat(&fscache_n_retrievals_nodata);
37414+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37415 else if (ret < 0)
37416- fscache_stat(&fscache_n_retrievals_nobufs);
37417+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37418 else
37419- fscache_stat(&fscache_n_retrievals_ok);
37420+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37421
37422 fscache_put_retrieval(op);
37423 _leave(" = %d", ret);
37424@@ -551,7 +551,7 @@ nobufs_unlock:
37425 spin_unlock(&cookie->lock);
37426 kfree(op);
37427 nobufs:
37428- fscache_stat(&fscache_n_retrievals_nobufs);
37429+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37430 _leave(" = -ENOBUFS");
37431 return -ENOBUFS;
37432 }
37433@@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37434
37435 _enter("%p,%p,,,", cookie, page);
37436
37437- fscache_stat(&fscache_n_allocs);
37438+ fscache_stat_unchecked(&fscache_n_allocs);
37439
37440 if (hlist_empty(&cookie->backing_objects))
37441 goto nobufs;
37442@@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37443 goto nobufs_unlock;
37444 spin_unlock(&cookie->lock);
37445
37446- fscache_stat(&fscache_n_alloc_ops);
37447+ fscache_stat_unchecked(&fscache_n_alloc_ops);
37448
37449 ret = fscache_wait_for_retrieval_activation(
37450 object, op,
37451@@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37452
37453 error:
37454 if (ret == -ERESTARTSYS)
37455- fscache_stat(&fscache_n_allocs_intr);
37456+ fscache_stat_unchecked(&fscache_n_allocs_intr);
37457 else if (ret < 0)
37458- fscache_stat(&fscache_n_allocs_nobufs);
37459+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37460 else
37461- fscache_stat(&fscache_n_allocs_ok);
37462+ fscache_stat_unchecked(&fscache_n_allocs_ok);
37463
37464 fscache_put_retrieval(op);
37465 _leave(" = %d", ret);
37466@@ -632,7 +632,7 @@ nobufs_unlock:
37467 spin_unlock(&cookie->lock);
37468 kfree(op);
37469 nobufs:
37470- fscache_stat(&fscache_n_allocs_nobufs);
37471+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37472 _leave(" = -ENOBUFS");
37473 return -ENOBUFS;
37474 }
37475@@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37476
37477 spin_lock(&cookie->stores_lock);
37478
37479- fscache_stat(&fscache_n_store_calls);
37480+ fscache_stat_unchecked(&fscache_n_store_calls);
37481
37482 /* find a page to store */
37483 page = NULL;
37484@@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37485 page = results[0];
37486 _debug("gang %d [%lx]", n, page->index);
37487 if (page->index > op->store_limit) {
37488- fscache_stat(&fscache_n_store_pages_over_limit);
37489+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37490 goto superseded;
37491 }
37492
37493@@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37494 spin_unlock(&object->lock);
37495
37496 fscache_set_op_state(&op->op, "Store");
37497- fscache_stat(&fscache_n_store_pages);
37498+ fscache_stat_unchecked(&fscache_n_store_pages);
37499 fscache_stat(&fscache_n_cop_write_page);
37500 ret = object->cache->ops->write_page(op, page);
37501 fscache_stat_d(&fscache_n_cop_write_page);
37502@@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37503 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37504 ASSERT(PageFsCache(page));
37505
37506- fscache_stat(&fscache_n_stores);
37507+ fscache_stat_unchecked(&fscache_n_stores);
37508
37509 op = kzalloc(sizeof(*op), GFP_NOIO);
37510 if (!op)
37511@@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37512 spin_unlock(&cookie->stores_lock);
37513 spin_unlock(&object->lock);
37514
37515- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37516+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37517 op->store_limit = object->store_limit;
37518
37519 if (fscache_submit_op(object, &op->op) < 0)
37520@@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37521
37522 spin_unlock(&cookie->lock);
37523 radix_tree_preload_end();
37524- fscache_stat(&fscache_n_store_ops);
37525- fscache_stat(&fscache_n_stores_ok);
37526+ fscache_stat_unchecked(&fscache_n_store_ops);
37527+ fscache_stat_unchecked(&fscache_n_stores_ok);
37528
37529 /* the work queue now carries its own ref on the object */
37530 fscache_put_operation(&op->op);
37531@@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37532 return 0;
37533
37534 already_queued:
37535- fscache_stat(&fscache_n_stores_again);
37536+ fscache_stat_unchecked(&fscache_n_stores_again);
37537 already_pending:
37538 spin_unlock(&cookie->stores_lock);
37539 spin_unlock(&object->lock);
37540 spin_unlock(&cookie->lock);
37541 radix_tree_preload_end();
37542 kfree(op);
37543- fscache_stat(&fscache_n_stores_ok);
37544+ fscache_stat_unchecked(&fscache_n_stores_ok);
37545 _leave(" = 0");
37546 return 0;
37547
37548@@ -864,14 +864,14 @@ nobufs:
37549 spin_unlock(&cookie->lock);
37550 radix_tree_preload_end();
37551 kfree(op);
37552- fscache_stat(&fscache_n_stores_nobufs);
37553+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
37554 _leave(" = -ENOBUFS");
37555 return -ENOBUFS;
37556
37557 nomem_free:
37558 kfree(op);
37559 nomem:
37560- fscache_stat(&fscache_n_stores_oom);
37561+ fscache_stat_unchecked(&fscache_n_stores_oom);
37562 _leave(" = -ENOMEM");
37563 return -ENOMEM;
37564 }
37565@@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37566 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37567 ASSERTCMP(page, !=, NULL);
37568
37569- fscache_stat(&fscache_n_uncaches);
37570+ fscache_stat_unchecked(&fscache_n_uncaches);
37571
37572 /* cache withdrawal may beat us to it */
37573 if (!PageFsCache(page))
37574@@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37575 unsigned long loop;
37576
37577 #ifdef CONFIG_FSCACHE_STATS
37578- atomic_add(pagevec->nr, &fscache_n_marks);
37579+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37580 #endif
37581
37582 for (loop = 0; loop < pagevec->nr; loop++) {
37583diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37584--- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37585+++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37586@@ -18,95 +18,95 @@
37587 /*
37588 * operation counters
37589 */
37590-atomic_t fscache_n_op_pend;
37591-atomic_t fscache_n_op_run;
37592-atomic_t fscache_n_op_enqueue;
37593-atomic_t fscache_n_op_requeue;
37594-atomic_t fscache_n_op_deferred_release;
37595-atomic_t fscache_n_op_release;
37596-atomic_t fscache_n_op_gc;
37597-atomic_t fscache_n_op_cancelled;
37598-atomic_t fscache_n_op_rejected;
37599-
37600-atomic_t fscache_n_attr_changed;
37601-atomic_t fscache_n_attr_changed_ok;
37602-atomic_t fscache_n_attr_changed_nobufs;
37603-atomic_t fscache_n_attr_changed_nomem;
37604-atomic_t fscache_n_attr_changed_calls;
37605-
37606-atomic_t fscache_n_allocs;
37607-atomic_t fscache_n_allocs_ok;
37608-atomic_t fscache_n_allocs_wait;
37609-atomic_t fscache_n_allocs_nobufs;
37610-atomic_t fscache_n_allocs_intr;
37611-atomic_t fscache_n_allocs_object_dead;
37612-atomic_t fscache_n_alloc_ops;
37613-atomic_t fscache_n_alloc_op_waits;
37614-
37615-atomic_t fscache_n_retrievals;
37616-atomic_t fscache_n_retrievals_ok;
37617-atomic_t fscache_n_retrievals_wait;
37618-atomic_t fscache_n_retrievals_nodata;
37619-atomic_t fscache_n_retrievals_nobufs;
37620-atomic_t fscache_n_retrievals_intr;
37621-atomic_t fscache_n_retrievals_nomem;
37622-atomic_t fscache_n_retrievals_object_dead;
37623-atomic_t fscache_n_retrieval_ops;
37624-atomic_t fscache_n_retrieval_op_waits;
37625-
37626-atomic_t fscache_n_stores;
37627-atomic_t fscache_n_stores_ok;
37628-atomic_t fscache_n_stores_again;
37629-atomic_t fscache_n_stores_nobufs;
37630-atomic_t fscache_n_stores_oom;
37631-atomic_t fscache_n_store_ops;
37632-atomic_t fscache_n_store_calls;
37633-atomic_t fscache_n_store_pages;
37634-atomic_t fscache_n_store_radix_deletes;
37635-atomic_t fscache_n_store_pages_over_limit;
37636-
37637-atomic_t fscache_n_store_vmscan_not_storing;
37638-atomic_t fscache_n_store_vmscan_gone;
37639-atomic_t fscache_n_store_vmscan_busy;
37640-atomic_t fscache_n_store_vmscan_cancelled;
37641-
37642-atomic_t fscache_n_marks;
37643-atomic_t fscache_n_uncaches;
37644-
37645-atomic_t fscache_n_acquires;
37646-atomic_t fscache_n_acquires_null;
37647-atomic_t fscache_n_acquires_no_cache;
37648-atomic_t fscache_n_acquires_ok;
37649-atomic_t fscache_n_acquires_nobufs;
37650-atomic_t fscache_n_acquires_oom;
37651-
37652-atomic_t fscache_n_updates;
37653-atomic_t fscache_n_updates_null;
37654-atomic_t fscache_n_updates_run;
37655-
37656-atomic_t fscache_n_relinquishes;
37657-atomic_t fscache_n_relinquishes_null;
37658-atomic_t fscache_n_relinquishes_waitcrt;
37659-atomic_t fscache_n_relinquishes_retire;
37660-
37661-atomic_t fscache_n_cookie_index;
37662-atomic_t fscache_n_cookie_data;
37663-atomic_t fscache_n_cookie_special;
37664-
37665-atomic_t fscache_n_object_alloc;
37666-atomic_t fscache_n_object_no_alloc;
37667-atomic_t fscache_n_object_lookups;
37668-atomic_t fscache_n_object_lookups_negative;
37669-atomic_t fscache_n_object_lookups_positive;
37670-atomic_t fscache_n_object_lookups_timed_out;
37671-atomic_t fscache_n_object_created;
37672-atomic_t fscache_n_object_avail;
37673-atomic_t fscache_n_object_dead;
37674-
37675-atomic_t fscache_n_checkaux_none;
37676-atomic_t fscache_n_checkaux_okay;
37677-atomic_t fscache_n_checkaux_update;
37678-atomic_t fscache_n_checkaux_obsolete;
37679+atomic_unchecked_t fscache_n_op_pend;
37680+atomic_unchecked_t fscache_n_op_run;
37681+atomic_unchecked_t fscache_n_op_enqueue;
37682+atomic_unchecked_t fscache_n_op_requeue;
37683+atomic_unchecked_t fscache_n_op_deferred_release;
37684+atomic_unchecked_t fscache_n_op_release;
37685+atomic_unchecked_t fscache_n_op_gc;
37686+atomic_unchecked_t fscache_n_op_cancelled;
37687+atomic_unchecked_t fscache_n_op_rejected;
37688+
37689+atomic_unchecked_t fscache_n_attr_changed;
37690+atomic_unchecked_t fscache_n_attr_changed_ok;
37691+atomic_unchecked_t fscache_n_attr_changed_nobufs;
37692+atomic_unchecked_t fscache_n_attr_changed_nomem;
37693+atomic_unchecked_t fscache_n_attr_changed_calls;
37694+
37695+atomic_unchecked_t fscache_n_allocs;
37696+atomic_unchecked_t fscache_n_allocs_ok;
37697+atomic_unchecked_t fscache_n_allocs_wait;
37698+atomic_unchecked_t fscache_n_allocs_nobufs;
37699+atomic_unchecked_t fscache_n_allocs_intr;
37700+atomic_unchecked_t fscache_n_allocs_object_dead;
37701+atomic_unchecked_t fscache_n_alloc_ops;
37702+atomic_unchecked_t fscache_n_alloc_op_waits;
37703+
37704+atomic_unchecked_t fscache_n_retrievals;
37705+atomic_unchecked_t fscache_n_retrievals_ok;
37706+atomic_unchecked_t fscache_n_retrievals_wait;
37707+atomic_unchecked_t fscache_n_retrievals_nodata;
37708+atomic_unchecked_t fscache_n_retrievals_nobufs;
37709+atomic_unchecked_t fscache_n_retrievals_intr;
37710+atomic_unchecked_t fscache_n_retrievals_nomem;
37711+atomic_unchecked_t fscache_n_retrievals_object_dead;
37712+atomic_unchecked_t fscache_n_retrieval_ops;
37713+atomic_unchecked_t fscache_n_retrieval_op_waits;
37714+
37715+atomic_unchecked_t fscache_n_stores;
37716+atomic_unchecked_t fscache_n_stores_ok;
37717+atomic_unchecked_t fscache_n_stores_again;
37718+atomic_unchecked_t fscache_n_stores_nobufs;
37719+atomic_unchecked_t fscache_n_stores_oom;
37720+atomic_unchecked_t fscache_n_store_ops;
37721+atomic_unchecked_t fscache_n_store_calls;
37722+atomic_unchecked_t fscache_n_store_pages;
37723+atomic_unchecked_t fscache_n_store_radix_deletes;
37724+atomic_unchecked_t fscache_n_store_pages_over_limit;
37725+
37726+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37727+atomic_unchecked_t fscache_n_store_vmscan_gone;
37728+atomic_unchecked_t fscache_n_store_vmscan_busy;
37729+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37730+
37731+atomic_unchecked_t fscache_n_marks;
37732+atomic_unchecked_t fscache_n_uncaches;
37733+
37734+atomic_unchecked_t fscache_n_acquires;
37735+atomic_unchecked_t fscache_n_acquires_null;
37736+atomic_unchecked_t fscache_n_acquires_no_cache;
37737+atomic_unchecked_t fscache_n_acquires_ok;
37738+atomic_unchecked_t fscache_n_acquires_nobufs;
37739+atomic_unchecked_t fscache_n_acquires_oom;
37740+
37741+atomic_unchecked_t fscache_n_updates;
37742+atomic_unchecked_t fscache_n_updates_null;
37743+atomic_unchecked_t fscache_n_updates_run;
37744+
37745+atomic_unchecked_t fscache_n_relinquishes;
37746+atomic_unchecked_t fscache_n_relinquishes_null;
37747+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37748+atomic_unchecked_t fscache_n_relinquishes_retire;
37749+
37750+atomic_unchecked_t fscache_n_cookie_index;
37751+atomic_unchecked_t fscache_n_cookie_data;
37752+atomic_unchecked_t fscache_n_cookie_special;
37753+
37754+atomic_unchecked_t fscache_n_object_alloc;
37755+atomic_unchecked_t fscache_n_object_no_alloc;
37756+atomic_unchecked_t fscache_n_object_lookups;
37757+atomic_unchecked_t fscache_n_object_lookups_negative;
37758+atomic_unchecked_t fscache_n_object_lookups_positive;
37759+atomic_unchecked_t fscache_n_object_lookups_timed_out;
37760+atomic_unchecked_t fscache_n_object_created;
37761+atomic_unchecked_t fscache_n_object_avail;
37762+atomic_unchecked_t fscache_n_object_dead;
37763+
37764+atomic_unchecked_t fscache_n_checkaux_none;
37765+atomic_unchecked_t fscache_n_checkaux_okay;
37766+atomic_unchecked_t fscache_n_checkaux_update;
37767+atomic_unchecked_t fscache_n_checkaux_obsolete;
37768
37769 atomic_t fscache_n_cop_alloc_object;
37770 atomic_t fscache_n_cop_lookup_object;
37771@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37772 seq_puts(m, "FS-Cache statistics\n");
37773
37774 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37775- atomic_read(&fscache_n_cookie_index),
37776- atomic_read(&fscache_n_cookie_data),
37777- atomic_read(&fscache_n_cookie_special));
37778+ atomic_read_unchecked(&fscache_n_cookie_index),
37779+ atomic_read_unchecked(&fscache_n_cookie_data),
37780+ atomic_read_unchecked(&fscache_n_cookie_special));
37781
37782 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37783- atomic_read(&fscache_n_object_alloc),
37784- atomic_read(&fscache_n_object_no_alloc),
37785- atomic_read(&fscache_n_object_avail),
37786- atomic_read(&fscache_n_object_dead));
37787+ atomic_read_unchecked(&fscache_n_object_alloc),
37788+ atomic_read_unchecked(&fscache_n_object_no_alloc),
37789+ atomic_read_unchecked(&fscache_n_object_avail),
37790+ atomic_read_unchecked(&fscache_n_object_dead));
37791 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37792- atomic_read(&fscache_n_checkaux_none),
37793- atomic_read(&fscache_n_checkaux_okay),
37794- atomic_read(&fscache_n_checkaux_update),
37795- atomic_read(&fscache_n_checkaux_obsolete));
37796+ atomic_read_unchecked(&fscache_n_checkaux_none),
37797+ atomic_read_unchecked(&fscache_n_checkaux_okay),
37798+ atomic_read_unchecked(&fscache_n_checkaux_update),
37799+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37800
37801 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37802- atomic_read(&fscache_n_marks),
37803- atomic_read(&fscache_n_uncaches));
37804+ atomic_read_unchecked(&fscache_n_marks),
37805+ atomic_read_unchecked(&fscache_n_uncaches));
37806
37807 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37808 " oom=%u\n",
37809- atomic_read(&fscache_n_acquires),
37810- atomic_read(&fscache_n_acquires_null),
37811- atomic_read(&fscache_n_acquires_no_cache),
37812- atomic_read(&fscache_n_acquires_ok),
37813- atomic_read(&fscache_n_acquires_nobufs),
37814- atomic_read(&fscache_n_acquires_oom));
37815+ atomic_read_unchecked(&fscache_n_acquires),
37816+ atomic_read_unchecked(&fscache_n_acquires_null),
37817+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
37818+ atomic_read_unchecked(&fscache_n_acquires_ok),
37819+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
37820+ atomic_read_unchecked(&fscache_n_acquires_oom));
37821
37822 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37823- atomic_read(&fscache_n_object_lookups),
37824- atomic_read(&fscache_n_object_lookups_negative),
37825- atomic_read(&fscache_n_object_lookups_positive),
37826- atomic_read(&fscache_n_object_created),
37827- atomic_read(&fscache_n_object_lookups_timed_out));
37828+ atomic_read_unchecked(&fscache_n_object_lookups),
37829+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
37830+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
37831+ atomic_read_unchecked(&fscache_n_object_created),
37832+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37833
37834 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37835- atomic_read(&fscache_n_updates),
37836- atomic_read(&fscache_n_updates_null),
37837- atomic_read(&fscache_n_updates_run));
37838+ atomic_read_unchecked(&fscache_n_updates),
37839+ atomic_read_unchecked(&fscache_n_updates_null),
37840+ atomic_read_unchecked(&fscache_n_updates_run));
37841
37842 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37843- atomic_read(&fscache_n_relinquishes),
37844- atomic_read(&fscache_n_relinquishes_null),
37845- atomic_read(&fscache_n_relinquishes_waitcrt),
37846- atomic_read(&fscache_n_relinquishes_retire));
37847+ atomic_read_unchecked(&fscache_n_relinquishes),
37848+ atomic_read_unchecked(&fscache_n_relinquishes_null),
37849+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37850+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
37851
37852 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37853- atomic_read(&fscache_n_attr_changed),
37854- atomic_read(&fscache_n_attr_changed_ok),
37855- atomic_read(&fscache_n_attr_changed_nobufs),
37856- atomic_read(&fscache_n_attr_changed_nomem),
37857- atomic_read(&fscache_n_attr_changed_calls));
37858+ atomic_read_unchecked(&fscache_n_attr_changed),
37859+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
37860+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37861+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37862+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
37863
37864 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37865- atomic_read(&fscache_n_allocs),
37866- atomic_read(&fscache_n_allocs_ok),
37867- atomic_read(&fscache_n_allocs_wait),
37868- atomic_read(&fscache_n_allocs_nobufs),
37869- atomic_read(&fscache_n_allocs_intr));
37870+ atomic_read_unchecked(&fscache_n_allocs),
37871+ atomic_read_unchecked(&fscache_n_allocs_ok),
37872+ atomic_read_unchecked(&fscache_n_allocs_wait),
37873+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
37874+ atomic_read_unchecked(&fscache_n_allocs_intr));
37875 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37876- atomic_read(&fscache_n_alloc_ops),
37877- atomic_read(&fscache_n_alloc_op_waits),
37878- atomic_read(&fscache_n_allocs_object_dead));
37879+ atomic_read_unchecked(&fscache_n_alloc_ops),
37880+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
37881+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
37882
37883 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37884 " int=%u oom=%u\n",
37885- atomic_read(&fscache_n_retrievals),
37886- atomic_read(&fscache_n_retrievals_ok),
37887- atomic_read(&fscache_n_retrievals_wait),
37888- atomic_read(&fscache_n_retrievals_nodata),
37889- atomic_read(&fscache_n_retrievals_nobufs),
37890- atomic_read(&fscache_n_retrievals_intr),
37891- atomic_read(&fscache_n_retrievals_nomem));
37892+ atomic_read_unchecked(&fscache_n_retrievals),
37893+ atomic_read_unchecked(&fscache_n_retrievals_ok),
37894+ atomic_read_unchecked(&fscache_n_retrievals_wait),
37895+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
37896+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37897+ atomic_read_unchecked(&fscache_n_retrievals_intr),
37898+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
37899 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37900- atomic_read(&fscache_n_retrieval_ops),
37901- atomic_read(&fscache_n_retrieval_op_waits),
37902- atomic_read(&fscache_n_retrievals_object_dead));
37903+ atomic_read_unchecked(&fscache_n_retrieval_ops),
37904+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37905+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37906
37907 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37908- atomic_read(&fscache_n_stores),
37909- atomic_read(&fscache_n_stores_ok),
37910- atomic_read(&fscache_n_stores_again),
37911- atomic_read(&fscache_n_stores_nobufs),
37912- atomic_read(&fscache_n_stores_oom));
37913+ atomic_read_unchecked(&fscache_n_stores),
37914+ atomic_read_unchecked(&fscache_n_stores_ok),
37915+ atomic_read_unchecked(&fscache_n_stores_again),
37916+ atomic_read_unchecked(&fscache_n_stores_nobufs),
37917+ atomic_read_unchecked(&fscache_n_stores_oom));
37918 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37919- atomic_read(&fscache_n_store_ops),
37920- atomic_read(&fscache_n_store_calls),
37921- atomic_read(&fscache_n_store_pages),
37922- atomic_read(&fscache_n_store_radix_deletes),
37923- atomic_read(&fscache_n_store_pages_over_limit));
37924+ atomic_read_unchecked(&fscache_n_store_ops),
37925+ atomic_read_unchecked(&fscache_n_store_calls),
37926+ atomic_read_unchecked(&fscache_n_store_pages),
37927+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
37928+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37929
37930 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37931- atomic_read(&fscache_n_store_vmscan_not_storing),
37932- atomic_read(&fscache_n_store_vmscan_gone),
37933- atomic_read(&fscache_n_store_vmscan_busy),
37934- atomic_read(&fscache_n_store_vmscan_cancelled));
37935+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37936+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37937+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37938+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37939
37940 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37941- atomic_read(&fscache_n_op_pend),
37942- atomic_read(&fscache_n_op_run),
37943- atomic_read(&fscache_n_op_enqueue),
37944- atomic_read(&fscache_n_op_cancelled),
37945- atomic_read(&fscache_n_op_rejected));
37946+ atomic_read_unchecked(&fscache_n_op_pend),
37947+ atomic_read_unchecked(&fscache_n_op_run),
37948+ atomic_read_unchecked(&fscache_n_op_enqueue),
37949+ atomic_read_unchecked(&fscache_n_op_cancelled),
37950+ atomic_read_unchecked(&fscache_n_op_rejected));
37951 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37952- atomic_read(&fscache_n_op_deferred_release),
37953- atomic_read(&fscache_n_op_release),
37954- atomic_read(&fscache_n_op_gc));
37955+ atomic_read_unchecked(&fscache_n_op_deferred_release),
37956+ atomic_read_unchecked(&fscache_n_op_release),
37957+ atomic_read_unchecked(&fscache_n_op_gc));
37958
37959 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37960 atomic_read(&fscache_n_cop_alloc_object),
37961diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37962--- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37963+++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37964@@ -4,6 +4,7 @@
37965 #include <linux/path.h>
37966 #include <linux/slab.h>
37967 #include <linux/fs_struct.h>
37968+#include <linux/grsecurity.h>
37969 #include "internal.h"
37970
37971 static inline void path_get_longterm(struct path *path)
37972@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37973 old_root = fs->root;
37974 fs->root = *path;
37975 path_get_longterm(path);
37976+ gr_set_chroot_entries(current, path);
37977 write_seqcount_end(&fs->seq);
37978 spin_unlock(&fs->lock);
37979 if (old_root.dentry)
37980@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37981 && fs->root.mnt == old_root->mnt) {
37982 path_get_longterm(new_root);
37983 fs->root = *new_root;
37984+ gr_set_chroot_entries(p, new_root);
37985 count++;
37986 }
37987 if (fs->pwd.dentry == old_root->dentry
37988@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37989 spin_lock(&fs->lock);
37990 write_seqcount_begin(&fs->seq);
37991 tsk->fs = NULL;
37992- kill = !--fs->users;
37993+ gr_clear_chroot_entries(tsk);
37994+ kill = !atomic_dec_return(&fs->users);
37995 write_seqcount_end(&fs->seq);
37996 spin_unlock(&fs->lock);
37997 task_unlock(tsk);
37998@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37999 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
38000 /* We don't need to lock fs - think why ;-) */
38001 if (fs) {
38002- fs->users = 1;
38003+ atomic_set(&fs->users, 1);
38004 fs->in_exec = 0;
38005 spin_lock_init(&fs->lock);
38006 seqcount_init(&fs->seq);
38007@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
38008 spin_lock(&old->lock);
38009 fs->root = old->root;
38010 path_get_longterm(&fs->root);
38011+ /* instead of calling gr_set_chroot_entries here,
38012+ we call it from every caller of this function
38013+ */
38014 fs->pwd = old->pwd;
38015 path_get_longterm(&fs->pwd);
38016 spin_unlock(&old->lock);
38017@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
38018
38019 task_lock(current);
38020 spin_lock(&fs->lock);
38021- kill = !--fs->users;
38022+ kill = !atomic_dec_return(&fs->users);
38023 current->fs = new_fs;
38024+ gr_set_chroot_entries(current, &new_fs->root);
38025 spin_unlock(&fs->lock);
38026 task_unlock(current);
38027
38028@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
38029
38030 /* to be mentioned only in INIT_TASK */
38031 struct fs_struct init_fs = {
38032- .users = 1,
38033+ .users = ATOMIC_INIT(1),
38034 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
38035 .seq = SEQCNT_ZERO,
38036 .umask = 0022,
38037@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
38038 task_lock(current);
38039
38040 spin_lock(&init_fs.lock);
38041- init_fs.users++;
38042+ atomic_inc(&init_fs.users);
38043 spin_unlock(&init_fs.lock);
38044
38045 spin_lock(&fs->lock);
38046 current->fs = &init_fs;
38047- kill = !--fs->users;
38048+ gr_set_chroot_entries(current, &current->fs->root);
38049+ kill = !atomic_dec_return(&fs->users);
38050 spin_unlock(&fs->lock);
38051
38052 task_unlock(current);
38053diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
38054--- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
38055+++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
38056@@ -586,10 +586,12 @@ static int __init cuse_init(void)
38057 INIT_LIST_HEAD(&cuse_conntbl[i]);
38058
38059 /* inherit and extend fuse_dev_operations */
38060- cuse_channel_fops = fuse_dev_operations;
38061- cuse_channel_fops.owner = THIS_MODULE;
38062- cuse_channel_fops.open = cuse_channel_open;
38063- cuse_channel_fops.release = cuse_channel_release;
38064+ pax_open_kernel();
38065+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
38066+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
38067+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
38068+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
38069+ pax_close_kernel();
38070
38071 cuse_class = class_create(THIS_MODULE, "cuse");
38072 if (IS_ERR(cuse_class))
38073diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
38074--- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
38075+++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
38076@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38077 ret = 0;
38078 pipe_lock(pipe);
38079
38080- if (!pipe->readers) {
38081+ if (!atomic_read(&pipe->readers)) {
38082 send_sig(SIGPIPE, current, 0);
38083 if (!ret)
38084 ret = -EPIPE;
38085diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
38086--- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
38087+++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
38088@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
38089 return link;
38090 }
38091
38092-static void free_link(char *link)
38093+static void free_link(const char *link)
38094 {
38095 if (!IS_ERR(link))
38096 free_page((unsigned long) link);
38097diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
38098--- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
38099+++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
38100@@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
38101 unsigned int x;
38102 int error;
38103
38104+ pax_track_stack();
38105+
38106 if (ndentry->d_inode) {
38107 nip = GFS2_I(ndentry->d_inode);
38108 if (ip == nip)
38109@@ -1019,7 +1021,7 @@ out:
38110
38111 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38112 {
38113- char *s = nd_get_link(nd);
38114+ const char *s = nd_get_link(nd);
38115 if (!IS_ERR(s))
38116 kfree(s);
38117 }
38118diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
38119--- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
38120+++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
38121@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38122 int err;
38123 u16 type;
38124
38125+ pax_track_stack();
38126+
38127 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38128 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38129 if (err)
38130@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38131 int entry_size;
38132 int err;
38133
38134+ pax_track_stack();
38135+
38136 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38137 str->name, cnid, inode->i_nlink);
38138 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38139@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38140 int entry_size, type;
38141 int err = 0;
38142
38143+ pax_track_stack();
38144+
38145 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38146 cnid, src_dir->i_ino, src_name->name,
38147 dst_dir->i_ino, dst_name->name);
38148diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38149--- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38150+++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38151@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38152 struct hfsplus_readdir_data *rd;
38153 u16 type;
38154
38155+ pax_track_stack();
38156+
38157 if (filp->f_pos >= inode->i_size)
38158 return 0;
38159
38160diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38161--- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38162+++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38163@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38164 int res = 0;
38165 u16 type;
38166
38167+ pax_track_stack();
38168+
38169 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38170
38171 HFSPLUS_I(inode)->linkid = 0;
38172@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38173 struct hfs_find_data fd;
38174 hfsplus_cat_entry entry;
38175
38176+ pax_track_stack();
38177+
38178 if (HFSPLUS_IS_RSRC(inode))
38179 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38180
38181diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38182--- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38183+++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38184@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38185 struct hfsplus_cat_file *file;
38186 int res;
38187
38188+ pax_track_stack();
38189+
38190 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38191 return -EOPNOTSUPP;
38192
38193@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38194 struct hfsplus_cat_file *file;
38195 ssize_t res = 0;
38196
38197+ pax_track_stack();
38198+
38199 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38200 return -EOPNOTSUPP;
38201
38202diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38203--- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38204+++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38205@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38206 struct nls_table *nls = NULL;
38207 int err;
38208
38209+ pax_track_stack();
38210+
38211 err = -EINVAL;
38212 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38213 if (!sbi)
38214diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38215--- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38216+++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38217@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38218 .kill_sb = kill_litter_super,
38219 };
38220
38221-static struct vfsmount *hugetlbfs_vfsmount;
38222+struct vfsmount *hugetlbfs_vfsmount;
38223
38224 static int can_do_hugetlb_shm(void)
38225 {
38226diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38227--- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38228+++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38229@@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38230
38231 #ifdef CONFIG_SMP
38232 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38233- static atomic_t shared_last_ino;
38234- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38235+ static atomic_unchecked_t shared_last_ino;
38236+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38237
38238 res = next - LAST_INO_BATCH;
38239 }
38240diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38241--- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38242+++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38243@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38244 tid_t this_tid;
38245 int result;
38246
38247+ pax_track_stack();
38248+
38249 jbd_debug(1, "Start checkpoint\n");
38250
38251 /*
38252diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38253--- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38254+++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38255@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38256 int outpos = 0;
38257 int pos=0;
38258
38259+ pax_track_stack();
38260+
38261 memset(positions,0,sizeof(positions));
38262
38263 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38264@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38265 int outpos = 0;
38266 int pos=0;
38267
38268+ pax_track_stack();
38269+
38270 memset(positions,0,sizeof(positions));
38271
38272 while (outpos<destlen) {
38273diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38274--- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38275+++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38276@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38277 int ret;
38278 uint32_t mysrclen, mydstlen;
38279
38280+ pax_track_stack();
38281+
38282 mysrclen = *sourcelen;
38283 mydstlen = *dstlen - 8;
38284
38285diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38286--- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38287+++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38288@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38289 struct jffs2_unknown_node marker = {
38290 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38291 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38292- .totlen = cpu_to_je32(c->cleanmarker_size)
38293+ .totlen = cpu_to_je32(c->cleanmarker_size),
38294+ .hdr_crc = cpu_to_je32(0)
38295 };
38296
38297 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38298diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38299--- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38300+++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38301@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38302 {
38303 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38304 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38305- .totlen = constant_cpu_to_je32(8)
38306+ .totlen = constant_cpu_to_je32(8),
38307+ .hdr_crc = constant_cpu_to_je32(0)
38308 };
38309
38310 /*
38311diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38312--- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38313+++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38314@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38315
38316 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38317
38318+ pax_track_stack();
38319+
38320 /* Phase.1 : Merge same xref */
38321 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38322 xref_tmphash[i] = NULL;
38323diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38324--- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38325+++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38326@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38327
38328 jfs_inode_cachep =
38329 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38330- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38331+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38332 init_once);
38333 if (jfs_inode_cachep == NULL)
38334 return -ENOMEM;
38335diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38336--- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38337+++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38338@@ -86,7 +86,7 @@ config HAVE_AOUT
38339
38340 config BINFMT_AOUT
38341 tristate "Kernel support for a.out and ECOFF binaries"
38342- depends on HAVE_AOUT
38343+ depends on HAVE_AOUT && BROKEN
38344 ---help---
38345 A.out (Assembler.OUTput) is a set of formats for libraries and
38346 executables used in the earliest versions of UNIX. Linux used
38347diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38348--- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38349+++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38350@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38351
38352 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38353 struct dentry *next;
38354+ char d_name[sizeof(next->d_iname)];
38355+ const unsigned char *name;
38356+
38357 next = list_entry(p, struct dentry, d_u.d_child);
38358 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38359 if (!simple_positive(next)) {
38360@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38361
38362 spin_unlock(&next->d_lock);
38363 spin_unlock(&dentry->d_lock);
38364- if (filldir(dirent, next->d_name.name,
38365+ name = next->d_name.name;
38366+ if (name == next->d_iname) {
38367+ memcpy(d_name, name, next->d_name.len);
38368+ name = d_name;
38369+ }
38370+ if (filldir(dirent, name,
38371 next->d_name.len, filp->f_pos,
38372 next->d_inode->i_ino,
38373 dt_type(next->d_inode)) < 0)
38374diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38375--- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38376+++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38377@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38378 /*
38379 * Cookie counter for NLM requests
38380 */
38381-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38382+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38383
38384 void nlmclnt_next_cookie(struct nlm_cookie *c)
38385 {
38386- u32 cookie = atomic_inc_return(&nlm_cookie);
38387+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38388
38389 memcpy(c->data, &cookie, 4);
38390 c->len=4;
38391@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38392 struct nlm_rqst reqst, *req;
38393 int status;
38394
38395+ pax_track_stack();
38396+
38397 req = &reqst;
38398 memset(req, 0, sizeof(*req));
38399 locks_init_lock(&req->a_args.lock.fl);
38400diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38401--- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38402+++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38403@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38404 return;
38405
38406 if (filp->f_op && filp->f_op->flock) {
38407- struct file_lock fl = {
38408+ struct file_lock flock = {
38409 .fl_pid = current->tgid,
38410 .fl_file = filp,
38411 .fl_flags = FL_FLOCK,
38412 .fl_type = F_UNLCK,
38413 .fl_end = OFFSET_MAX,
38414 };
38415- filp->f_op->flock(filp, F_SETLKW, &fl);
38416- if (fl.fl_ops && fl.fl_ops->fl_release_private)
38417- fl.fl_ops->fl_release_private(&fl);
38418+ filp->f_op->flock(filp, F_SETLKW, &flock);
38419+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
38420+ flock.fl_ops->fl_release_private(&flock);
38421 }
38422
38423 lock_flocks();
38424diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38425--- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38426+++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38427@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38428 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38429 int err, valid0, valid1;
38430
38431+ pax_track_stack();
38432+
38433 /* read first superblock */
38434 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38435 if (err)
38436diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38437--- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38438+++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38439@@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38440 return ret;
38441
38442 /*
38443- * Read/write DACs are always overridable.
38444- * Executable DACs are overridable if at least one exec bit is set.
38445+ * Searching includes executable on directories, else just read.
38446 */
38447- if (!(mask & MAY_EXEC) || execute_ok(inode))
38448- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38449+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38450+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38451+#ifdef CONFIG_GRKERNSEC
38452+ if (flags & IPERM_FLAG_RCU)
38453+ return -ECHILD;
38454+#endif
38455+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38456 return 0;
38457+ }
38458
38459 /*
38460- * Searching includes executable on directories, else just read.
38461+ * Read/write DACs are always overridable.
38462+ * Executable DACs are overridable if at least one exec bit is set.
38463 */
38464- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38465- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38466- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38467+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38468+#ifdef CONFIG_GRKERNSEC
38469+ if (flags & IPERM_FLAG_RCU)
38470+ return -ECHILD;
38471+#endif
38472+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38473 return 0;
38474+ }
38475
38476 return -EACCES;
38477 }
38478@@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38479 struct dentry *dentry = nd->path.dentry;
38480 int status;
38481
38482+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38483+ return -ENOENT;
38484+
38485 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38486 return 0;
38487
38488@@ -671,9 +684,16 @@ static inline int exec_permission(struct
38489 if (ret == -ECHILD)
38490 return ret;
38491
38492- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38493- ns_capable(ns, CAP_DAC_READ_SEARCH))
38494+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38495 goto ok;
38496+ else {
38497+#ifdef CONFIG_GRKERNSEC
38498+ if (flags & IPERM_FLAG_RCU)
38499+ return -ECHILD;
38500+#endif
38501+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38502+ goto ok;
38503+ }
38504
38505 return ret;
38506 ok:
38507@@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38508 return error;
38509 }
38510
38511+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
38512+ dentry->d_inode, dentry, nd->path.mnt)) {
38513+ error = -EACCES;
38514+ *p = ERR_PTR(error); /* no ->put_link(), please */
38515+ path_put(&nd->path);
38516+ return error;
38517+ }
38518+
38519 nd->last_type = LAST_BIND;
38520 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38521 error = PTR_ERR(*p);
38522 if (!IS_ERR(*p)) {
38523- char *s = nd_get_link(nd);
38524+ const char *s = nd_get_link(nd);
38525 error = 0;
38526 if (s)
38527 error = __vfs_follow_link(nd, s);
38528@@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38529 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38530
38531 if (likely(!retval)) {
38532+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38533+ return -ENOENT;
38534+
38535 if (unlikely(!audit_dummy_context())) {
38536 if (nd->path.dentry && nd->inode)
38537 audit_inode(name, nd->path.dentry);
38538@@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38539 return error;
38540 }
38541
38542+/*
38543+ * Note that while the flag value (low two bits) for sys_open means:
38544+ * 00 - read-only
38545+ * 01 - write-only
38546+ * 10 - read-write
38547+ * 11 - special
38548+ * it is changed into
38549+ * 00 - no permissions needed
38550+ * 01 - read-permission
38551+ * 10 - write-permission
38552+ * 11 - read-write
38553+ * for the internal routines (ie open_namei()/follow_link() etc)
38554+ * This is more logical, and also allows the 00 "no perm needed"
38555+ * to be used for symlinks (where the permissions are checked
38556+ * later).
38557+ *
38558+*/
38559+static inline int open_to_namei_flags(int flag)
38560+{
38561+ if ((flag+1) & O_ACCMODE)
38562+ flag++;
38563+ return flag;
38564+}
38565+
38566 static int may_open(struct path *path, int acc_mode, int flag)
38567 {
38568 struct dentry *dentry = path->dentry;
38569@@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38570 /*
38571 * Ensure there are no outstanding leases on the file.
38572 */
38573- return break_lease(inode, flag);
38574+ error = break_lease(inode, flag);
38575+
38576+ if (error)
38577+ return error;
38578+
38579+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38580+ error = -EPERM;
38581+ goto exit;
38582+ }
38583+
38584+ if (gr_handle_rawio(inode)) {
38585+ error = -EPERM;
38586+ goto exit;
38587+ }
38588+
38589+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38590+ error = -EACCES;
38591+ goto exit;
38592+ }
38593+exit:
38594+ return error;
38595 }
38596
38597 static int handle_truncate(struct file *filp)
38598@@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38599 }
38600
38601 /*
38602- * Note that while the flag value (low two bits) for sys_open means:
38603- * 00 - read-only
38604- * 01 - write-only
38605- * 10 - read-write
38606- * 11 - special
38607- * it is changed into
38608- * 00 - no permissions needed
38609- * 01 - read-permission
38610- * 10 - write-permission
38611- * 11 - read-write
38612- * for the internal routines (ie open_namei()/follow_link() etc)
38613- * This is more logical, and also allows the 00 "no perm needed"
38614- * to be used for symlinks (where the permissions are checked
38615- * later).
38616- *
38617-*/
38618-static inline int open_to_namei_flags(int flag)
38619-{
38620- if ((flag+1) & O_ACCMODE)
38621- flag++;
38622- return flag;
38623-}
38624-
38625-/*
38626 * Handle the last step of open()
38627 */
38628 static struct file *do_last(struct nameidata *nd, struct path *path,
38629@@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38630 struct dentry *dir = nd->path.dentry;
38631 struct dentry *dentry;
38632 int open_flag = op->open_flag;
38633+ int flag = open_to_namei_flags(open_flag);
38634 int will_truncate = open_flag & O_TRUNC;
38635 int want_write = 0;
38636 int acc_mode = op->acc_mode;
38637@@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38638 /* Negative dentry, just create the file */
38639 if (!dentry->d_inode) {
38640 int mode = op->mode;
38641+
38642+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38643+ error = -EACCES;
38644+ goto exit_mutex_unlock;
38645+ }
38646+
38647 if (!IS_POSIXACL(dir->d_inode))
38648 mode &= ~current_umask();
38649 /*
38650@@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38651 error = vfs_create(dir->d_inode, dentry, mode, nd);
38652 if (error)
38653 goto exit_mutex_unlock;
38654+ else
38655+ gr_handle_create(path->dentry, path->mnt);
38656 mutex_unlock(&dir->d_inode->i_mutex);
38657 dput(nd->path.dentry);
38658 nd->path.dentry = dentry;
38659@@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38660 /*
38661 * It already exists.
38662 */
38663+
38664+ /* only check if O_CREAT is specified, all other checks need to go
38665+ into may_open */
38666+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38667+ error = -EACCES;
38668+ goto exit_mutex_unlock;
38669+ }
38670+
38671 mutex_unlock(&dir->d_inode->i_mutex);
38672 audit_inode(pathname, path->dentry);
38673
38674@@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38675 error = may_mknod(mode);
38676 if (error)
38677 goto out_dput;
38678+
38679+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38680+ error = -EPERM;
38681+ goto out_dput;
38682+ }
38683+
38684+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38685+ error = -EACCES;
38686+ goto out_dput;
38687+ }
38688+
38689 error = mnt_want_write(nd.path.mnt);
38690 if (error)
38691 goto out_dput;
38692@@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38693 }
38694 out_drop_write:
38695 mnt_drop_write(nd.path.mnt);
38696+
38697+ if (!error)
38698+ gr_handle_create(dentry, nd.path.mnt);
38699 out_dput:
38700 dput(dentry);
38701 out_unlock:
38702@@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38703 if (IS_ERR(dentry))
38704 goto out_unlock;
38705
38706+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38707+ error = -EACCES;
38708+ goto out_dput;
38709+ }
38710+
38711 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38712 mode &= ~current_umask();
38713 error = mnt_want_write(nd.path.mnt);
38714@@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38715 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38716 out_drop_write:
38717 mnt_drop_write(nd.path.mnt);
38718+
38719+ if (!error)
38720+ gr_handle_create(dentry, nd.path.mnt);
38721+
38722 out_dput:
38723 dput(dentry);
38724 out_unlock:
38725@@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38726 char * name;
38727 struct dentry *dentry;
38728 struct nameidata nd;
38729+ ino_t saved_ino = 0;
38730+ dev_t saved_dev = 0;
38731
38732 error = user_path_parent(dfd, pathname, &nd, &name);
38733 if (error)
38734@@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38735 error = PTR_ERR(dentry);
38736 if (IS_ERR(dentry))
38737 goto exit2;
38738+
38739+ if (dentry->d_inode != NULL) {
38740+ if (dentry->d_inode->i_nlink <= 1) {
38741+ saved_ino = dentry->d_inode->i_ino;
38742+ saved_dev = gr_get_dev_from_dentry(dentry);
38743+ }
38744+
38745+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38746+ error = -EACCES;
38747+ goto exit3;
38748+ }
38749+ }
38750+
38751 error = mnt_want_write(nd.path.mnt);
38752 if (error)
38753 goto exit3;
38754@@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38755 if (error)
38756 goto exit4;
38757 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38758+ if (!error && (saved_dev || saved_ino))
38759+ gr_handle_delete(saved_ino, saved_dev);
38760 exit4:
38761 mnt_drop_write(nd.path.mnt);
38762 exit3:
38763@@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38764 struct dentry *dentry;
38765 struct nameidata nd;
38766 struct inode *inode = NULL;
38767+ ino_t saved_ino = 0;
38768+ dev_t saved_dev = 0;
38769
38770 error = user_path_parent(dfd, pathname, &nd, &name);
38771 if (error)
38772@@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38773 if (nd.last.name[nd.last.len])
38774 goto slashes;
38775 inode = dentry->d_inode;
38776- if (inode)
38777+ if (inode) {
38778 ihold(inode);
38779+ if (inode->i_nlink <= 1) {
38780+ saved_ino = inode->i_ino;
38781+ saved_dev = gr_get_dev_from_dentry(dentry);
38782+ }
38783+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38784+ error = -EACCES;
38785+ goto exit2;
38786+ }
38787+ }
38788 error = mnt_want_write(nd.path.mnt);
38789 if (error)
38790 goto exit2;
38791@@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38792 if (error)
38793 goto exit3;
38794 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38795+ if (!error && (saved_ino || saved_dev))
38796+ gr_handle_delete(saved_ino, saved_dev);
38797 exit3:
38798 mnt_drop_write(nd.path.mnt);
38799 exit2:
38800@@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38801 if (IS_ERR(dentry))
38802 goto out_unlock;
38803
38804+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38805+ error = -EACCES;
38806+ goto out_dput;
38807+ }
38808+
38809 error = mnt_want_write(nd.path.mnt);
38810 if (error)
38811 goto out_dput;
38812@@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38813 if (error)
38814 goto out_drop_write;
38815 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38816+ if (!error)
38817+ gr_handle_create(dentry, nd.path.mnt);
38818 out_drop_write:
38819 mnt_drop_write(nd.path.mnt);
38820 out_dput:
38821@@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38822 error = PTR_ERR(new_dentry);
38823 if (IS_ERR(new_dentry))
38824 goto out_unlock;
38825+
38826+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38827+ old_path.dentry->d_inode,
38828+ old_path.dentry->d_inode->i_mode, to)) {
38829+ error = -EACCES;
38830+ goto out_dput;
38831+ }
38832+
38833+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38834+ old_path.dentry, old_path.mnt, to)) {
38835+ error = -EACCES;
38836+ goto out_dput;
38837+ }
38838+
38839 error = mnt_want_write(nd.path.mnt);
38840 if (error)
38841 goto out_dput;
38842@@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38843 if (error)
38844 goto out_drop_write;
38845 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38846+ if (!error)
38847+ gr_handle_create(new_dentry, nd.path.mnt);
38848 out_drop_write:
38849 mnt_drop_write(nd.path.mnt);
38850 out_dput:
38851@@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38852 char *to;
38853 int error;
38854
38855+ pax_track_stack();
38856+
38857 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38858 if (error)
38859 goto exit;
38860@@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38861 if (new_dentry == trap)
38862 goto exit5;
38863
38864+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38865+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
38866+ to);
38867+ if (error)
38868+ goto exit5;
38869+
38870 error = mnt_want_write(oldnd.path.mnt);
38871 if (error)
38872 goto exit5;
38873@@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38874 goto exit6;
38875 error = vfs_rename(old_dir->d_inode, old_dentry,
38876 new_dir->d_inode, new_dentry);
38877+ if (!error)
38878+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38879+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38880 exit6:
38881 mnt_drop_write(oldnd.path.mnt);
38882 exit5:
38883@@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38884
38885 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38886 {
38887+ char tmpbuf[64];
38888+ const char *newlink;
38889 int len;
38890
38891 len = PTR_ERR(link);
38892@@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38893 len = strlen(link);
38894 if (len > (unsigned) buflen)
38895 len = buflen;
38896- if (copy_to_user(buffer, link, len))
38897+
38898+ if (len < sizeof(tmpbuf)) {
38899+ memcpy(tmpbuf, link, len);
38900+ newlink = tmpbuf;
38901+ } else
38902+ newlink = link;
38903+
38904+ if (copy_to_user(buffer, newlink, len))
38905 len = -EFAULT;
38906 out:
38907 return len;
38908diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38909--- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38910+++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38911@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38912 if (!(sb->s_flags & MS_RDONLY))
38913 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38914 up_write(&sb->s_umount);
38915+
38916+ gr_log_remount(mnt->mnt_devname, retval);
38917+
38918 return retval;
38919 }
38920
38921@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38922 br_write_unlock(vfsmount_lock);
38923 up_write(&namespace_sem);
38924 release_mounts(&umount_list);
38925+
38926+ gr_log_unmount(mnt->mnt_devname, retval);
38927+
38928 return retval;
38929 }
38930
38931@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38932 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38933 MS_STRICTATIME);
38934
38935+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38936+ retval = -EPERM;
38937+ goto dput_out;
38938+ }
38939+
38940+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38941+ retval = -EPERM;
38942+ goto dput_out;
38943+ }
38944+
38945 if (flags & MS_REMOUNT)
38946 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38947 data_page);
38948@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38949 dev_name, data_page);
38950 dput_out:
38951 path_put(&path);
38952+
38953+ gr_log_mount(dev_name, dir_name, retval);
38954+
38955 return retval;
38956 }
38957
38958@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38959 if (error)
38960 goto out2;
38961
38962+ if (gr_handle_chroot_pivot()) {
38963+ error = -EPERM;
38964+ goto out2;
38965+ }
38966+
38967 get_fs_root(current->fs, &root);
38968 error = lock_mount(&old);
38969 if (error)
38970diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38971--- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38972+++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38973@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38974 int res, val = 0, len;
38975 __u8 __name[NCP_MAXPATHLEN + 1];
38976
38977+ pax_track_stack();
38978+
38979 if (dentry == dentry->d_sb->s_root)
38980 return 1;
38981
38982@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38983 int error, res, len;
38984 __u8 __name[NCP_MAXPATHLEN + 1];
38985
38986+ pax_track_stack();
38987+
38988 error = -EIO;
38989 if (!ncp_conn_valid(server))
38990 goto finished;
38991@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38992 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38993 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38994
38995+ pax_track_stack();
38996+
38997 ncp_age_dentry(server, dentry);
38998 len = sizeof(__name);
38999 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
39000@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
39001 int error, len;
39002 __u8 __name[NCP_MAXPATHLEN + 1];
39003
39004+ pax_track_stack();
39005+
39006 DPRINTK("ncp_mkdir: making %s/%s\n",
39007 dentry->d_parent->d_name.name, dentry->d_name.name);
39008
39009@@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
39010 int old_len, new_len;
39011 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
39012
39013+ pax_track_stack();
39014+
39015 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
39016 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
39017 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
39018diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
39019--- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
39020+++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39021@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
39022 #endif
39023 struct ncp_entry_info finfo;
39024
39025+ pax_track_stack();
39026+
39027 data.wdog_pid = NULL;
39028 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
39029 if (!server)
39030diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
39031--- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
39032+++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39033@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
39034 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
39035 nfsi->attrtimeo_timestamp = jiffies;
39036
39037- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
39038+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
39039 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
39040 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
39041 else
39042@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
39043 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
39044 }
39045
39046-static atomic_long_t nfs_attr_generation_counter;
39047+static atomic_long_unchecked_t nfs_attr_generation_counter;
39048
39049 static unsigned long nfs_read_attr_generation_counter(void)
39050 {
39051- return atomic_long_read(&nfs_attr_generation_counter);
39052+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
39053 }
39054
39055 unsigned long nfs_inc_attr_generation_counter(void)
39056 {
39057- return atomic_long_inc_return(&nfs_attr_generation_counter);
39058+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
39059 }
39060
39061 void nfs_fattr_init(struct nfs_fattr *fattr)
39062diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
39063--- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
39064+++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
39065@@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
39066 unsigned int strhashval;
39067 int err;
39068
39069+ pax_track_stack();
39070+
39071 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
39072 (long long) lock->lk_offset,
39073 (long long) lock->lk_length);
39074diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
39075--- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
39076+++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
39077@@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
39078 .dentry = dentry,
39079 };
39080
39081+ pax_track_stack();
39082+
39083 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
39084 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39085 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39086diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
39087--- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
39088+++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
39089@@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39090 } else {
39091 oldfs = get_fs();
39092 set_fs(KERNEL_DS);
39093- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39094+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39095 set_fs(oldfs);
39096 }
39097
39098@@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39099
39100 /* Write the data. */
39101 oldfs = get_fs(); set_fs(KERNEL_DS);
39102- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39103+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39104 set_fs(oldfs);
39105 if (host_err < 0)
39106 goto out_nfserr;
39107@@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39108 */
39109
39110 oldfs = get_fs(); set_fs(KERNEL_DS);
39111- host_err = inode->i_op->readlink(dentry, buf, *lenp);
39112+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39113 set_fs(oldfs);
39114
39115 if (host_err < 0)
39116diff -urNp linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c
39117--- linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-05-19 00:06:34.000000000 -0400
39118+++ linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-08-14 11:28:46.000000000 -0400
39119@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39120 goto out_close_fd;
39121
39122 ret = -EFAULT;
39123- if (copy_to_user(buf, &fanotify_event_metadata,
39124+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39125+ copy_to_user(buf, &fanotify_event_metadata,
39126 fanotify_event_metadata.event_len))
39127 goto out_kill_access_response;
39128
39129diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
39130--- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
39131+++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
39132@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39133 * get set to 0 so it will never get 'freed'
39134 */
39135 static struct fsnotify_event *q_overflow_event;
39136-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39137+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39138
39139 /**
39140 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39141@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39142 */
39143 u32 fsnotify_get_cookie(void)
39144 {
39145- return atomic_inc_return(&fsnotify_sync_cookie);
39146+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39147 }
39148 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39149
39150diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39151--- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39152+++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39153@@ -1329,7 +1329,7 @@ find_next_index_buffer:
39154 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39155 ~(s64)(ndir->itype.index.block_size - 1)));
39156 /* Bounds checks. */
39157- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39158+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39159 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39160 "inode 0x%lx or driver bug.", vdir->i_ino);
39161 goto err_out;
39162diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39163--- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39164+++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39165@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39166 #endif /* NTFS_RW */
39167 };
39168
39169-const struct file_operations ntfs_empty_file_ops = {};
39170+const struct file_operations ntfs_empty_file_ops __read_only;
39171
39172-const struct inode_operations ntfs_empty_inode_ops = {};
39173+const struct inode_operations ntfs_empty_inode_ops __read_only;
39174diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39175--- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39176+++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39177@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39178 goto bail;
39179 }
39180
39181- atomic_inc(&osb->alloc_stats.moves);
39182+ atomic_inc_unchecked(&osb->alloc_stats.moves);
39183
39184 bail:
39185 if (handle)
39186diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39187--- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39188+++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39189@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39190 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39191 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39192
39193+ pax_track_stack();
39194+
39195 /* At some point it might be nice to break this function up a
39196 * bit. */
39197
39198diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39199--- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39200+++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39201@@ -235,11 +235,11 @@ enum ocfs2_vol_state
39202
39203 struct ocfs2_alloc_stats
39204 {
39205- atomic_t moves;
39206- atomic_t local_data;
39207- atomic_t bitmap_data;
39208- atomic_t bg_allocs;
39209- atomic_t bg_extends;
39210+ atomic_unchecked_t moves;
39211+ atomic_unchecked_t local_data;
39212+ atomic_unchecked_t bitmap_data;
39213+ atomic_unchecked_t bg_allocs;
39214+ atomic_unchecked_t bg_extends;
39215 };
39216
39217 enum ocfs2_local_alloc_state
39218diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39219--- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39220+++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39221@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39222 mlog_errno(status);
39223 goto bail;
39224 }
39225- atomic_inc(&osb->alloc_stats.bg_extends);
39226+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39227
39228 /* You should never ask for this much metadata */
39229 BUG_ON(bits_wanted >
39230@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39231 mlog_errno(status);
39232 goto bail;
39233 }
39234- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39235+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39236
39237 *suballoc_loc = res.sr_bg_blkno;
39238 *suballoc_bit_start = res.sr_bit_offset;
39239@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39240 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39241 res->sr_bits);
39242
39243- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39244+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39245
39246 BUG_ON(res->sr_bits != 1);
39247
39248@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39249 mlog_errno(status);
39250 goto bail;
39251 }
39252- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39253+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39254
39255 BUG_ON(res.sr_bits != 1);
39256
39257@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39258 cluster_start,
39259 num_clusters);
39260 if (!status)
39261- atomic_inc(&osb->alloc_stats.local_data);
39262+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
39263 } else {
39264 if (min_clusters > (osb->bitmap_cpg - 1)) {
39265 /* The only paths asking for contiguousness
39266@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39267 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39268 res.sr_bg_blkno,
39269 res.sr_bit_offset);
39270- atomic_inc(&osb->alloc_stats.bitmap_data);
39271+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39272 *num_clusters = res.sr_bits;
39273 }
39274 }
39275diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39276--- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39277+++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39278@@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39279 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39280 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39281 "Stats",
39282- atomic_read(&osb->alloc_stats.bitmap_data),
39283- atomic_read(&osb->alloc_stats.local_data),
39284- atomic_read(&osb->alloc_stats.bg_allocs),
39285- atomic_read(&osb->alloc_stats.moves),
39286- atomic_read(&osb->alloc_stats.bg_extends));
39287+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39288+ atomic_read_unchecked(&osb->alloc_stats.local_data),
39289+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39290+ atomic_read_unchecked(&osb->alloc_stats.moves),
39291+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39292
39293 out += snprintf(buf + out, len - out,
39294 "%10s => State: %u Descriptor: %llu Size: %u bits "
39295@@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39296 spin_lock_init(&osb->osb_xattr_lock);
39297 ocfs2_init_steal_slots(osb);
39298
39299- atomic_set(&osb->alloc_stats.moves, 0);
39300- atomic_set(&osb->alloc_stats.local_data, 0);
39301- atomic_set(&osb->alloc_stats.bitmap_data, 0);
39302- atomic_set(&osb->alloc_stats.bg_allocs, 0);
39303- atomic_set(&osb->alloc_stats.bg_extends, 0);
39304+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39305+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39306+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39307+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39308+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39309
39310 /* Copy the blockcheck stats from the superblock probe */
39311 osb->osb_ecc_stats = *stats;
39312diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39313--- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39314+++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39315@@ -142,7 +142,7 @@ bail:
39316
39317 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39318 {
39319- char *link = nd_get_link(nd);
39320+ const char *link = nd_get_link(nd);
39321 if (!IS_ERR(link))
39322 kfree(link);
39323 }
39324diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39325--- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39326+++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39327@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39328 error = locks_verify_truncate(inode, NULL, length);
39329 if (!error)
39330 error = security_path_truncate(&path);
39331+
39332+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39333+ error = -EACCES;
39334+
39335 if (!error)
39336 error = do_truncate(path.dentry, length, 0, NULL);
39337
39338@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39339 if (__mnt_is_readonly(path.mnt))
39340 res = -EROFS;
39341
39342+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39343+ res = -EACCES;
39344+
39345 out_path_release:
39346 path_put(&path);
39347 out:
39348@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39349 if (error)
39350 goto dput_and_out;
39351
39352+ gr_log_chdir(path.dentry, path.mnt);
39353+
39354 set_fs_pwd(current->fs, &path);
39355
39356 dput_and_out:
39357@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39358 goto out_putf;
39359
39360 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39361+
39362+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39363+ error = -EPERM;
39364+
39365+ if (!error)
39366+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39367+
39368 if (!error)
39369 set_fs_pwd(current->fs, &file->f_path);
39370 out_putf:
39371@@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39372 if (error)
39373 goto dput_and_out;
39374
39375+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39376+ goto dput_and_out;
39377+
39378+ if (gr_handle_chroot_caps(&path)) {
39379+ error = -ENOMEM;
39380+ goto dput_and_out;
39381+ }
39382+
39383 set_fs_root(current->fs, &path);
39384+
39385+ gr_handle_chroot_chdir(&path);
39386+
39387 error = 0;
39388 dput_and_out:
39389 path_put(&path);
39390@@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39391 err = mnt_want_write_file(file);
39392 if (err)
39393 goto out_putf;
39394+
39395 mutex_lock(&inode->i_mutex);
39396+
39397+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39398+ err = -EACCES;
39399+ goto out_unlock;
39400+ }
39401+
39402 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39403 if (err)
39404 goto out_unlock;
39405 if (mode == (mode_t) -1)
39406 mode = inode->i_mode;
39407+
39408+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39409+ err = -EACCES;
39410+ goto out_unlock;
39411+ }
39412+
39413 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39414 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39415 err = notify_change(dentry, &newattrs);
39416@@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39417 error = mnt_want_write(path.mnt);
39418 if (error)
39419 goto dput_and_out;
39420+
39421 mutex_lock(&inode->i_mutex);
39422+
39423+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39424+ error = -EACCES;
39425+ goto out_unlock;
39426+ }
39427+
39428 error = security_path_chmod(path.dentry, path.mnt, mode);
39429 if (error)
39430 goto out_unlock;
39431 if (mode == (mode_t) -1)
39432 mode = inode->i_mode;
39433+
39434+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39435+ error = -EACCES;
39436+ goto out_unlock;
39437+ }
39438+
39439 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39440 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39441 error = notify_change(path.dentry, &newattrs);
39442@@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39443 int error;
39444 struct iattr newattrs;
39445
39446+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
39447+ return -EACCES;
39448+
39449 newattrs.ia_valid = ATTR_CTIME;
39450 if (user != (uid_t) -1) {
39451 newattrs.ia_valid |= ATTR_UID;
39452@@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39453 if (!IS_ERR(tmp)) {
39454 fd = get_unused_fd_flags(flags);
39455 if (fd >= 0) {
39456- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39457+ struct file *f;
39458+ /* don't allow to be set by userland */
39459+ flags &= ~FMODE_GREXEC;
39460+ f = do_filp_open(dfd, tmp, &op, lookup);
39461 if (IS_ERR(f)) {
39462 put_unused_fd(fd);
39463 fd = PTR_ERR(f);
39464diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39465--- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39466+++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39467@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39468 ldm_error ("A VBLK claims to have %d parts.", num);
39469 return false;
39470 }
39471+
39472 if (rec >= num) {
39473 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39474 return false;
39475@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39476 goto found;
39477 }
39478
39479- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39480+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39481 if (!f) {
39482 ldm_crit ("Out of memory.");
39483 return false;
39484diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39485--- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39486+++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39487@@ -420,9 +420,9 @@ redo:
39488 }
39489 if (bufs) /* More to do? */
39490 continue;
39491- if (!pipe->writers)
39492+ if (!atomic_read(&pipe->writers))
39493 break;
39494- if (!pipe->waiting_writers) {
39495+ if (!atomic_read(&pipe->waiting_writers)) {
39496 /* syscall merging: Usually we must not sleep
39497 * if O_NONBLOCK is set, or if we got some data.
39498 * But if a writer sleeps in kernel space, then
39499@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39500 mutex_lock(&inode->i_mutex);
39501 pipe = inode->i_pipe;
39502
39503- if (!pipe->readers) {
39504+ if (!atomic_read(&pipe->readers)) {
39505 send_sig(SIGPIPE, current, 0);
39506 ret = -EPIPE;
39507 goto out;
39508@@ -530,7 +530,7 @@ redo1:
39509 for (;;) {
39510 int bufs;
39511
39512- if (!pipe->readers) {
39513+ if (!atomic_read(&pipe->readers)) {
39514 send_sig(SIGPIPE, current, 0);
39515 if (!ret)
39516 ret = -EPIPE;
39517@@ -616,9 +616,9 @@ redo2:
39518 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39519 do_wakeup = 0;
39520 }
39521- pipe->waiting_writers++;
39522+ atomic_inc(&pipe->waiting_writers);
39523 pipe_wait(pipe);
39524- pipe->waiting_writers--;
39525+ atomic_dec(&pipe->waiting_writers);
39526 }
39527 out:
39528 mutex_unlock(&inode->i_mutex);
39529@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39530 mask = 0;
39531 if (filp->f_mode & FMODE_READ) {
39532 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39533- if (!pipe->writers && filp->f_version != pipe->w_counter)
39534+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39535 mask |= POLLHUP;
39536 }
39537
39538@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39539 * Most Unices do not set POLLERR for FIFOs but on Linux they
39540 * behave exactly like pipes for poll().
39541 */
39542- if (!pipe->readers)
39543+ if (!atomic_read(&pipe->readers))
39544 mask |= POLLERR;
39545 }
39546
39547@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39548
39549 mutex_lock(&inode->i_mutex);
39550 pipe = inode->i_pipe;
39551- pipe->readers -= decr;
39552- pipe->writers -= decw;
39553+ atomic_sub(decr, &pipe->readers);
39554+ atomic_sub(decw, &pipe->writers);
39555
39556- if (!pipe->readers && !pipe->writers) {
39557+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39558 free_pipe_info(inode);
39559 } else {
39560 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39561@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39562
39563 if (inode->i_pipe) {
39564 ret = 0;
39565- inode->i_pipe->readers++;
39566+ atomic_inc(&inode->i_pipe->readers);
39567 }
39568
39569 mutex_unlock(&inode->i_mutex);
39570@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39571
39572 if (inode->i_pipe) {
39573 ret = 0;
39574- inode->i_pipe->writers++;
39575+ atomic_inc(&inode->i_pipe->writers);
39576 }
39577
39578 mutex_unlock(&inode->i_mutex);
39579@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39580 if (inode->i_pipe) {
39581 ret = 0;
39582 if (filp->f_mode & FMODE_READ)
39583- inode->i_pipe->readers++;
39584+ atomic_inc(&inode->i_pipe->readers);
39585 if (filp->f_mode & FMODE_WRITE)
39586- inode->i_pipe->writers++;
39587+ atomic_inc(&inode->i_pipe->writers);
39588 }
39589
39590 mutex_unlock(&inode->i_mutex);
39591@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39592 inode->i_pipe = NULL;
39593 }
39594
39595-static struct vfsmount *pipe_mnt __read_mostly;
39596+struct vfsmount *pipe_mnt __read_mostly;
39597
39598 /*
39599 * pipefs_dname() is called from d_path().
39600@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39601 goto fail_iput;
39602 inode->i_pipe = pipe;
39603
39604- pipe->readers = pipe->writers = 1;
39605+ atomic_set(&pipe->readers, 1);
39606+ atomic_set(&pipe->writers, 1);
39607 inode->i_fop = &rdwr_pipefifo_fops;
39608
39609 /*
39610diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39611--- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39612+++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39613@@ -60,6 +60,7 @@
39614 #include <linux/tty.h>
39615 #include <linux/string.h>
39616 #include <linux/mman.h>
39617+#include <linux/grsecurity.h>
39618 #include <linux/proc_fs.h>
39619 #include <linux/ioport.h>
39620 #include <linux/uaccess.h>
39621@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39622 seq_putc(m, '\n');
39623 }
39624
39625+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39626+static inline void task_pax(struct seq_file *m, struct task_struct *p)
39627+{
39628+ if (p->mm)
39629+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39630+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39631+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39632+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39633+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39634+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39635+ else
39636+ seq_printf(m, "PaX:\t-----\n");
39637+}
39638+#endif
39639+
39640 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39641 struct pid *pid, struct task_struct *task)
39642 {
39643@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39644 task_cpus_allowed(m, task);
39645 cpuset_task_status_allowed(m, task);
39646 task_context_switch_counts(m, task);
39647+
39648+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39649+ task_pax(m, task);
39650+#endif
39651+
39652+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39653+ task_grsec_rbac(m, task);
39654+#endif
39655+
39656 return 0;
39657 }
39658
39659+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39660+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39661+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39662+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39663+#endif
39664+
39665 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39666 struct pid *pid, struct task_struct *task, int whole)
39667 {
39668@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39669 cputime_t cutime, cstime, utime, stime;
39670 cputime_t cgtime, gtime;
39671 unsigned long rsslim = 0;
39672- char tcomm[sizeof(task->comm)];
39673+ char tcomm[sizeof(task->comm)] = { 0 };
39674 unsigned long flags;
39675
39676+ pax_track_stack();
39677+
39678 state = *get_task_state(task);
39679 vsize = eip = esp = 0;
39680 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39681@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39682 gtime = task->gtime;
39683 }
39684
39685+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39686+ if (PAX_RAND_FLAGS(mm)) {
39687+ eip = 0;
39688+ esp = 0;
39689+ wchan = 0;
39690+ }
39691+#endif
39692+#ifdef CONFIG_GRKERNSEC_HIDESYM
39693+ wchan = 0;
39694+ eip =0;
39695+ esp =0;
39696+#endif
39697+
39698 /* scale priority and nice values from timeslices to -20..20 */
39699 /* to make it look like a "normal" Unix priority/nice value */
39700 priority = task_prio(task);
39701@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39702 vsize,
39703 mm ? get_mm_rss(mm) : 0,
39704 rsslim,
39705+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39706+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39707+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39708+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39709+#else
39710 mm ? (permitted ? mm->start_code : 1) : 0,
39711 mm ? (permitted ? mm->end_code : 1) : 0,
39712 (permitted && mm) ? mm->start_stack : 0,
39713+#endif
39714 esp,
39715 eip,
39716 /* The signal information here is obsolete.
39717@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39718
39719 return 0;
39720 }
39721+
39722+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39723+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39724+{
39725+ u32 curr_ip = 0;
39726+ unsigned long flags;
39727+
39728+ if (lock_task_sighand(task, &flags)) {
39729+ curr_ip = task->signal->curr_ip;
39730+ unlock_task_sighand(task, &flags);
39731+ }
39732+
39733+ return sprintf(buffer, "%pI4\n", &curr_ip);
39734+}
39735+#endif
39736diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39737--- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39738+++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39739@@ -104,6 +104,22 @@ struct pid_entry {
39740 union proc_op op;
39741 };
39742
39743+struct getdents_callback {
39744+ struct linux_dirent __user * current_dir;
39745+ struct linux_dirent __user * previous;
39746+ struct file * file;
39747+ int count;
39748+ int error;
39749+};
39750+
39751+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39752+ loff_t offset, u64 ino, unsigned int d_type)
39753+{
39754+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
39755+ buf->error = -EINVAL;
39756+ return 0;
39757+}
39758+
39759 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39760 .name = (NAME), \
39761 .len = sizeof(NAME) - 1, \
39762@@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39763 if (task == current)
39764 return mm;
39765
39766+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39767+ return ERR_PTR(-EPERM);
39768+
39769 /*
39770 * If current is actively ptrace'ing, and would also be
39771 * permitted to freshly attach with ptrace now, permit it.
39772@@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39773 if (!mm->arg_end)
39774 goto out_mm; /* Shh! No looking before we're done */
39775
39776+ if (gr_acl_handle_procpidmem(task))
39777+ goto out_mm;
39778+
39779 len = mm->arg_end - mm->arg_start;
39780
39781 if (len > PAGE_SIZE)
39782@@ -306,12 +328,28 @@ out:
39783 return res;
39784 }
39785
39786+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39787+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39788+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39789+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39790+#endif
39791+
39792 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39793 {
39794 struct mm_struct *mm = mm_for_maps(task);
39795 int res = PTR_ERR(mm);
39796 if (mm && !IS_ERR(mm)) {
39797 unsigned int nwords = 0;
39798+
39799+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39800+ /* allow if we're currently ptracing this task */
39801+ if (PAX_RAND_FLAGS(mm) &&
39802+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39803+ mmput(mm);
39804+ return res;
39805+ }
39806+#endif
39807+
39808 do {
39809 nwords += 2;
39810 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39811@@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39812 }
39813
39814
39815-#ifdef CONFIG_KALLSYMS
39816+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39817 /*
39818 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39819 * Returns the resolved symbol. If that fails, simply return the address.
39820@@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39821 mutex_unlock(&task->signal->cred_guard_mutex);
39822 }
39823
39824-#ifdef CONFIG_STACKTRACE
39825+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39826
39827 #define MAX_STACK_TRACE_DEPTH 64
39828
39829@@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39830 return count;
39831 }
39832
39833-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39834+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39835 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39836 {
39837 long nr;
39838@@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39839 /************************************************************************/
39840
39841 /* permission checks */
39842-static int proc_fd_access_allowed(struct inode *inode)
39843+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39844 {
39845 struct task_struct *task;
39846 int allowed = 0;
39847@@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39848 */
39849 task = get_proc_task(inode);
39850 if (task) {
39851- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39852+ if (log)
39853+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39854+ else
39855+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39856 put_task_struct(task);
39857 }
39858 return allowed;
39859@@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39860 if (!task)
39861 goto out_no_task;
39862
39863+ if (gr_acl_handle_procpidmem(task))
39864+ goto out;
39865+
39866 ret = -ENOMEM;
39867 page = (char *)__get_free_page(GFP_TEMPORARY);
39868 if (!page)
39869@@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39870 path_put(&nd->path);
39871
39872 /* Are we allowed to snoop on the tasks file descriptors? */
39873- if (!proc_fd_access_allowed(inode))
39874+ if (!proc_fd_access_allowed(inode,0))
39875 goto out;
39876
39877 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39878@@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39879 struct path path;
39880
39881 /* Are we allowed to snoop on the tasks file descriptors? */
39882- if (!proc_fd_access_allowed(inode))
39883- goto out;
39884+ /* logging this is needed for learning on chromium to work properly,
39885+ but we don't want to flood the logs from 'ps' which does a readlink
39886+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39887+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
39888+ */
39889+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39890+ if (!proc_fd_access_allowed(inode,0))
39891+ goto out;
39892+ } else {
39893+ if (!proc_fd_access_allowed(inode,1))
39894+ goto out;
39895+ }
39896
39897 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39898 if (error)
39899@@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39900 rcu_read_lock();
39901 cred = __task_cred(task);
39902 inode->i_uid = cred->euid;
39903+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39904+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39905+#else
39906 inode->i_gid = cred->egid;
39907+#endif
39908 rcu_read_unlock();
39909 }
39910 security_task_to_inode(task, inode);
39911@@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39912 struct inode *inode = dentry->d_inode;
39913 struct task_struct *task;
39914 const struct cred *cred;
39915+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39916+ const struct cred *tmpcred = current_cred();
39917+#endif
39918
39919 generic_fillattr(inode, stat);
39920
39921@@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39922 stat->uid = 0;
39923 stat->gid = 0;
39924 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39925+
39926+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39927+ rcu_read_unlock();
39928+ return -ENOENT;
39929+ }
39930+
39931 if (task) {
39932+ cred = __task_cred(task);
39933+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39934+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39935+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39936+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39937+#endif
39938+ ) {
39939+#endif
39940 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39941+#ifdef CONFIG_GRKERNSEC_PROC_USER
39942+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39943+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39944+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39945+#endif
39946 task_dumpable(task)) {
39947- cred = __task_cred(task);
39948 stat->uid = cred->euid;
39949+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39950+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39951+#else
39952 stat->gid = cred->egid;
39953+#endif
39954 }
39955+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39956+ } else {
39957+ rcu_read_unlock();
39958+ return -ENOENT;
39959+ }
39960+#endif
39961 }
39962 rcu_read_unlock();
39963 return 0;
39964@@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39965
39966 if (task) {
39967 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39968+#ifdef CONFIG_GRKERNSEC_PROC_USER
39969+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39970+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39971+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39972+#endif
39973 task_dumpable(task)) {
39974 rcu_read_lock();
39975 cred = __task_cred(task);
39976 inode->i_uid = cred->euid;
39977+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39978+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39979+#else
39980 inode->i_gid = cred->egid;
39981+#endif
39982 rcu_read_unlock();
39983 } else {
39984 inode->i_uid = 0;
39985@@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
39986 int fd = proc_fd(inode);
39987
39988 if (task) {
39989- files = get_files_struct(task);
39990+ if (!gr_acl_handle_procpidmem(task))
39991+ files = get_files_struct(task);
39992 put_task_struct(task);
39993 }
39994 if (files) {
39995@@ -2219,15 +2318,25 @@ static const struct file_operations proc
39996 */
39997 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39998 {
39999+ struct task_struct *task;
40000 int rv;
40001
40002 if (flags & IPERM_FLAG_RCU)
40003 return -ECHILD;
40004 rv = generic_permission(inode, mask, flags, NULL);
40005- if (rv == 0)
40006- return 0;
40007+
40008 if (task_pid(current) == proc_pid(inode))
40009 rv = 0;
40010+
40011+ task = get_proc_task(inode);
40012+ if (task == NULL)
40013+ return rv;
40014+
40015+ if (gr_acl_handle_procpidmem(task))
40016+ rv = -EACCES;
40017+
40018+ put_task_struct(task);
40019+
40020 return rv;
40021 }
40022
40023@@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
40024 if (!task)
40025 goto out_no_task;
40026
40027+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40028+ goto out;
40029+
40030 /*
40031 * Yes, it does not scale. And it should not. Don't add
40032 * new entries into /proc/<tgid>/ without very good reasons.
40033@@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
40034 if (!task)
40035 goto out_no_task;
40036
40037+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40038+ goto out;
40039+
40040 ret = 0;
40041 i = filp->f_pos;
40042 switch (i) {
40043@@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
40044 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
40045 void *cookie)
40046 {
40047- char *s = nd_get_link(nd);
40048+ const char *s = nd_get_link(nd);
40049 if (!IS_ERR(s))
40050 __putname(s);
40051 }
40052@@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
40053 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
40054 #endif
40055 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40056-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40057+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40058 INF("syscall", S_IRUGO, proc_pid_syscall),
40059 #endif
40060 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40061@@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
40062 #ifdef CONFIG_SECURITY
40063 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40064 #endif
40065-#ifdef CONFIG_KALLSYMS
40066+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40067 INF("wchan", S_IRUGO, proc_pid_wchan),
40068 #endif
40069-#ifdef CONFIG_STACKTRACE
40070+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40071 ONE("stack", S_IRUGO, proc_pid_stack),
40072 #endif
40073 #ifdef CONFIG_SCHEDSTATS
40074@@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
40075 #ifdef CONFIG_TASK_IO_ACCOUNTING
40076 INF("io", S_IRUSR, proc_tgid_io_accounting),
40077 #endif
40078+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40079+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
40080+#endif
40081 };
40082
40083 static int proc_tgid_base_readdir(struct file * filp,
40084@@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
40085 if (!inode)
40086 goto out;
40087
40088+#ifdef CONFIG_GRKERNSEC_PROC_USER
40089+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40090+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40091+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40092+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40093+#else
40094 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40095+#endif
40096 inode->i_op = &proc_tgid_base_inode_operations;
40097 inode->i_fop = &proc_tgid_base_operations;
40098 inode->i_flags|=S_IMMUTABLE;
40099@@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
40100 if (!task)
40101 goto out;
40102
40103+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40104+ goto out_put_task;
40105+
40106 result = proc_pid_instantiate(dir, dentry, task, NULL);
40107+out_put_task:
40108 put_task_struct(task);
40109 out:
40110 return result;
40111@@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
40112 {
40113 unsigned int nr;
40114 struct task_struct *reaper;
40115+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40116+ const struct cred *tmpcred = current_cred();
40117+ const struct cred *itercred;
40118+#endif
40119+ filldir_t __filldir = filldir;
40120 struct tgid_iter iter;
40121 struct pid_namespace *ns;
40122
40123@@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
40124 for (iter = next_tgid(ns, iter);
40125 iter.task;
40126 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40127+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40128+ rcu_read_lock();
40129+ itercred = __task_cred(iter.task);
40130+#endif
40131+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40132+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40133+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40134+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40135+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40136+#endif
40137+ )
40138+#endif
40139+ )
40140+ __filldir = &gr_fake_filldir;
40141+ else
40142+ __filldir = filldir;
40143+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40144+ rcu_read_unlock();
40145+#endif
40146 filp->f_pos = iter.tgid + TGID_OFFSET;
40147- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40148+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40149 put_task_struct(iter.task);
40150 goto out;
40151 }
40152@@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40153 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40154 #endif
40155 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40156-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40157+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40158 INF("syscall", S_IRUGO, proc_pid_syscall),
40159 #endif
40160 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40161@@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40162 #ifdef CONFIG_SECURITY
40163 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40164 #endif
40165-#ifdef CONFIG_KALLSYMS
40166+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40167 INF("wchan", S_IRUGO, proc_pid_wchan),
40168 #endif
40169-#ifdef CONFIG_STACKTRACE
40170+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40171 ONE("stack", S_IRUGO, proc_pid_stack),
40172 #endif
40173 #ifdef CONFIG_SCHEDSTATS
40174diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40175--- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40176+++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40177@@ -23,7 +23,11 @@ static const struct file_operations cmdl
40178
40179 static int __init proc_cmdline_init(void)
40180 {
40181+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40182+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40183+#else
40184 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40185+#endif
40186 return 0;
40187 }
40188 module_init(proc_cmdline_init);
40189diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40190--- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40191+++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40192@@ -64,7 +64,11 @@ static const struct file_operations proc
40193
40194 static int __init proc_devices_init(void)
40195 {
40196+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40197+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40198+#else
40199 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40200+#endif
40201 return 0;
40202 }
40203 module_init(proc_devices_init);
40204diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40205--- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40206+++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40207@@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40208 if (de->mode) {
40209 inode->i_mode = de->mode;
40210 inode->i_uid = de->uid;
40211+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40212+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40213+#else
40214 inode->i_gid = de->gid;
40215+#endif
40216 }
40217 if (de->size)
40218 inode->i_size = de->size;
40219diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40220--- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40221+++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40222@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40223 struct pid *pid, struct task_struct *task);
40224 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40225 struct pid *pid, struct task_struct *task);
40226+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40227+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40228+#endif
40229 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40230
40231 extern const struct file_operations proc_maps_operations;
40232diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40233--- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40234+++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40235@@ -30,12 +30,12 @@ config PROC_FS
40236
40237 config PROC_KCORE
40238 bool "/proc/kcore support" if !ARM
40239- depends on PROC_FS && MMU
40240+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40241
40242 config PROC_VMCORE
40243 bool "/proc/vmcore support"
40244- depends on PROC_FS && CRASH_DUMP
40245- default y
40246+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40247+ default n
40248 help
40249 Exports the dump image of crashed kernel in ELF format.
40250
40251@@ -59,8 +59,8 @@ config PROC_SYSCTL
40252 limited in memory.
40253
40254 config PROC_PAGE_MONITOR
40255- default y
40256- depends on PROC_FS && MMU
40257+ default n
40258+ depends on PROC_FS && MMU && !GRKERNSEC
40259 bool "Enable /proc page monitoring" if EXPERT
40260 help
40261 Various /proc files exist to monitor process memory utilization:
40262diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40263--- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40264+++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40265@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40266 off_t offset = 0;
40267 struct kcore_list *m;
40268
40269+ pax_track_stack();
40270+
40271 /* setup ELF header */
40272 elf = (struct elfhdr *) bufp;
40273 bufp += sizeof(struct elfhdr);
40274@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40275 * the addresses in the elf_phdr on our list.
40276 */
40277 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40278- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40279+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40280+ if (tsz > buflen)
40281 tsz = buflen;
40282-
40283+
40284 while (buflen) {
40285 struct kcore_list *m;
40286
40287@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40288 kfree(elf_buf);
40289 } else {
40290 if (kern_addr_valid(start)) {
40291- unsigned long n;
40292+ char *elf_buf;
40293+ mm_segment_t oldfs;
40294
40295- n = copy_to_user(buffer, (char *)start, tsz);
40296- /*
40297- * We cannot distingush between fault on source
40298- * and fault on destination. When this happens
40299- * we clear too and hope it will trigger the
40300- * EFAULT again.
40301- */
40302- if (n) {
40303- if (clear_user(buffer + tsz - n,
40304- n))
40305+ elf_buf = kmalloc(tsz, GFP_KERNEL);
40306+ if (!elf_buf)
40307+ return -ENOMEM;
40308+ oldfs = get_fs();
40309+ set_fs(KERNEL_DS);
40310+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40311+ set_fs(oldfs);
40312+ if (copy_to_user(buffer, elf_buf, tsz)) {
40313+ kfree(elf_buf);
40314 return -EFAULT;
40315+ }
40316 }
40317+ set_fs(oldfs);
40318+ kfree(elf_buf);
40319 } else {
40320 if (clear_user(buffer, tsz))
40321 return -EFAULT;
40322@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40323
40324 static int open_kcore(struct inode *inode, struct file *filp)
40325 {
40326+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40327+ return -EPERM;
40328+#endif
40329 if (!capable(CAP_SYS_RAWIO))
40330 return -EPERM;
40331 if (kcore_need_update)
40332diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40333--- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40334+++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40335@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40336 unsigned long pages[NR_LRU_LISTS];
40337 int lru;
40338
40339+ pax_track_stack();
40340+
40341 /*
40342 * display in kilobytes.
40343 */
40344@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40345 vmi.used >> 10,
40346 vmi.largest_chunk >> 10
40347 #ifdef CONFIG_MEMORY_FAILURE
40348- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40349+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40350 #endif
40351 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40352 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40353diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40354--- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40355+++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40356@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40357 if (len < 1)
40358 len = 1;
40359 seq_printf(m, "%*c", len, ' ');
40360- seq_path(m, &file->f_path, "");
40361+ seq_path(m, &file->f_path, "\n\\");
40362 }
40363
40364 seq_putc(m, '\n');
40365diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40366--- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40367+++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40368@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40369 struct task_struct *task;
40370 struct nsproxy *ns;
40371 struct net *net = NULL;
40372+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40373+ const struct cred *cred = current_cred();
40374+#endif
40375+
40376+#ifdef CONFIG_GRKERNSEC_PROC_USER
40377+ if (cred->fsuid)
40378+ return net;
40379+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40380+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40381+ return net;
40382+#endif
40383
40384 rcu_read_lock();
40385 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40386diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40387--- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40388+++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40389@@ -8,6 +8,8 @@
40390 #include <linux/namei.h>
40391 #include "internal.h"
40392
40393+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40394+
40395 static const struct dentry_operations proc_sys_dentry_operations;
40396 static const struct file_operations proc_sys_file_operations;
40397 static const struct inode_operations proc_sys_inode_operations;
40398@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40399 if (!p)
40400 goto out;
40401
40402+ if (gr_handle_sysctl(p, MAY_EXEC))
40403+ goto out;
40404+
40405 err = ERR_PTR(-ENOMEM);
40406 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40407 if (h)
40408@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40409 if (*pos < file->f_pos)
40410 continue;
40411
40412+ if (gr_handle_sysctl(table, 0))
40413+ continue;
40414+
40415 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40416 if (res)
40417 return res;
40418@@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40419 if (IS_ERR(head))
40420 return PTR_ERR(head);
40421
40422+ if (table && gr_handle_sysctl(table, MAY_EXEC))
40423+ return -ENOENT;
40424+
40425 generic_fillattr(inode, stat);
40426 if (table)
40427 stat->mode = (stat->mode & S_IFMT) | table->mode;
40428diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40429--- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40430+++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40431@@ -122,7 +122,15 @@ void __init proc_root_init(void)
40432 #ifdef CONFIG_PROC_DEVICETREE
40433 proc_device_tree_init();
40434 #endif
40435+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40436+#ifdef CONFIG_GRKERNSEC_PROC_USER
40437+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40438+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40439+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40440+#endif
40441+#else
40442 proc_mkdir("bus", NULL);
40443+#endif
40444 proc_sys_init();
40445 }
40446
40447diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40448--- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40449+++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40450@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40451 "VmExe:\t%8lu kB\n"
40452 "VmLib:\t%8lu kB\n"
40453 "VmPTE:\t%8lu kB\n"
40454- "VmSwap:\t%8lu kB\n",
40455- hiwater_vm << (PAGE_SHIFT-10),
40456+ "VmSwap:\t%8lu kB\n"
40457+
40458+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40459+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40460+#endif
40461+
40462+ ,hiwater_vm << (PAGE_SHIFT-10),
40463 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40464 mm->locked_vm << (PAGE_SHIFT-10),
40465 hiwater_rss << (PAGE_SHIFT-10),
40466@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40467 data << (PAGE_SHIFT-10),
40468 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40469 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40470- swap << (PAGE_SHIFT-10));
40471+ swap << (PAGE_SHIFT-10)
40472+
40473+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40474+ , mm->context.user_cs_base, mm->context.user_cs_limit
40475+#endif
40476+
40477+ );
40478 }
40479
40480 unsigned long task_vsize(struct mm_struct *mm)
40481@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40482 return ret;
40483 }
40484
40485+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40486+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40487+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
40488+ _mm->pax_flags & MF_PAX_SEGMEXEC))
40489+#endif
40490+
40491 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40492 {
40493 struct mm_struct *mm = vma->vm_mm;
40494@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40495 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40496 }
40497
40498- /* We don't show the stack guard page in /proc/maps */
40499+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40500+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40501+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40502+#else
40503 start = vma->vm_start;
40504- if (stack_guard_page_start(vma, start))
40505- start += PAGE_SIZE;
40506 end = vma->vm_end;
40507- if (stack_guard_page_end(vma, end))
40508- end -= PAGE_SIZE;
40509+#endif
40510
40511 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40512 start,
40513@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40514 flags & VM_WRITE ? 'w' : '-',
40515 flags & VM_EXEC ? 'x' : '-',
40516 flags & VM_MAYSHARE ? 's' : 'p',
40517+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40518+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40519+#else
40520 pgoff,
40521+#endif
40522 MAJOR(dev), MINOR(dev), ino, &len);
40523
40524 /*
40525@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40526 */
40527 if (file) {
40528 pad_len_spaces(m, len);
40529- seq_path(m, &file->f_path, "\n");
40530+ seq_path(m, &file->f_path, "\n\\");
40531 } else {
40532 const char *name = arch_vma_name(vma);
40533 if (!name) {
40534@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40535 if (vma->vm_start <= mm->brk &&
40536 vma->vm_end >= mm->start_brk) {
40537 name = "[heap]";
40538- } else if (vma->vm_start <= mm->start_stack &&
40539- vma->vm_end >= mm->start_stack) {
40540+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40541+ (vma->vm_start <= mm->start_stack &&
40542+ vma->vm_end >= mm->start_stack)) {
40543 name = "[stack]";
40544 }
40545 } else {
40546@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40547 };
40548
40549 memset(&mss, 0, sizeof mss);
40550- mss.vma = vma;
40551- /* mmap_sem is held in m_start */
40552- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40553- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40554-
40555+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40556+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40557+#endif
40558+ mss.vma = vma;
40559+ /* mmap_sem is held in m_start */
40560+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40561+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40562+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40563+ }
40564+#endif
40565 show_map_vma(m, vma);
40566
40567 seq_printf(m,
40568@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40569 "KernelPageSize: %8lu kB\n"
40570 "MMUPageSize: %8lu kB\n"
40571 "Locked: %8lu kB\n",
40572+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40573+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40574+#else
40575 (vma->vm_end - vma->vm_start) >> 10,
40576+#endif
40577 mss.resident >> 10,
40578 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40579 mss.shared_clean >> 10,
40580diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40581--- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40582+++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40583@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40584 else
40585 bytes += kobjsize(mm);
40586
40587- if (current->fs && current->fs->users > 1)
40588+ if (current->fs && atomic_read(&current->fs->users) > 1)
40589 sbytes += kobjsize(current->fs);
40590 else
40591 bytes += kobjsize(current->fs);
40592@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40593
40594 if (file) {
40595 pad_len_spaces(m, len);
40596- seq_path(m, &file->f_path, "");
40597+ seq_path(m, &file->f_path, "\n\\");
40598 } else if (mm) {
40599 if (vma->vm_start <= mm->start_stack &&
40600 vma->vm_end >= mm->start_stack) {
40601diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40602--- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40603+++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40604@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40605 void quota_send_warning(short type, unsigned int id, dev_t dev,
40606 const char warntype)
40607 {
40608- static atomic_t seq;
40609+ static atomic_unchecked_t seq;
40610 struct sk_buff *skb;
40611 void *msg_head;
40612 int ret;
40613@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40614 "VFS: Not enough memory to send quota warning.\n");
40615 return;
40616 }
40617- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40618+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40619 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40620 if (!msg_head) {
40621 printk(KERN_ERR
40622diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40623--- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40624+++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40625@@ -17,6 +17,7 @@
40626 #include <linux/security.h>
40627 #include <linux/syscalls.h>
40628 #include <linux/unistd.h>
40629+#include <linux/namei.h>
40630
40631 #include <asm/uaccess.h>
40632
40633@@ -67,6 +68,7 @@ struct old_linux_dirent {
40634
40635 struct readdir_callback {
40636 struct old_linux_dirent __user * dirent;
40637+ struct file * file;
40638 int result;
40639 };
40640
40641@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40642 buf->result = -EOVERFLOW;
40643 return -EOVERFLOW;
40644 }
40645+
40646+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40647+ return 0;
40648+
40649 buf->result++;
40650 dirent = buf->dirent;
40651 if (!access_ok(VERIFY_WRITE, dirent,
40652@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40653
40654 buf.result = 0;
40655 buf.dirent = dirent;
40656+ buf.file = file;
40657
40658 error = vfs_readdir(file, fillonedir, &buf);
40659 if (buf.result)
40660@@ -142,6 +149,7 @@ struct linux_dirent {
40661 struct getdents_callback {
40662 struct linux_dirent __user * current_dir;
40663 struct linux_dirent __user * previous;
40664+ struct file * file;
40665 int count;
40666 int error;
40667 };
40668@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40669 buf->error = -EOVERFLOW;
40670 return -EOVERFLOW;
40671 }
40672+
40673+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40674+ return 0;
40675+
40676 dirent = buf->previous;
40677 if (dirent) {
40678 if (__put_user(offset, &dirent->d_off))
40679@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40680 buf.previous = NULL;
40681 buf.count = count;
40682 buf.error = 0;
40683+ buf.file = file;
40684
40685 error = vfs_readdir(file, filldir, &buf);
40686 if (error >= 0)
40687@@ -229,6 +242,7 @@ out:
40688 struct getdents_callback64 {
40689 struct linux_dirent64 __user * current_dir;
40690 struct linux_dirent64 __user * previous;
40691+ struct file *file;
40692 int count;
40693 int error;
40694 };
40695@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40696 buf->error = -EINVAL; /* only used if we fail.. */
40697 if (reclen > buf->count)
40698 return -EINVAL;
40699+
40700+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40701+ return 0;
40702+
40703 dirent = buf->previous;
40704 if (dirent) {
40705 if (__put_user(offset, &dirent->d_off))
40706@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40707
40708 buf.current_dir = dirent;
40709 buf.previous = NULL;
40710+ buf.file = file;
40711 buf.count = count;
40712 buf.error = 0;
40713
40714diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40715--- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40716+++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40717@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40718 struct reiserfs_dir_entry de;
40719 int ret = 0;
40720
40721+ pax_track_stack();
40722+
40723 reiserfs_write_lock(inode->i_sb);
40724
40725 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40726diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40727--- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40728+++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40729@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40730 return;
40731 }
40732
40733- atomic_inc(&(fs_generation(tb->tb_sb)));
40734+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40735 do_balance_starts(tb);
40736
40737 /* balance leaf returns 0 except if combining L R and S into
40738diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40739--- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40740+++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40741@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40742 struct buffer_head *bh;
40743 int i, j;
40744
40745+ pax_track_stack();
40746+
40747 bh = __getblk(dev, block, bufsize);
40748 if (buffer_uptodate(bh))
40749 return (bh);
40750diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40751--- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40752+++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40753@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40754 unsigned long savelink = 1;
40755 struct timespec ctime;
40756
40757+ pax_track_stack();
40758+
40759 /* three balancings: (1) old name removal, (2) new name insertion
40760 and (3) maybe "save" link insertion
40761 stat data updates: (1) old directory,
40762diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40763--- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40764+++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40765@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40766 "SMALL_TAILS " : "NO_TAILS ",
40767 replay_only(sb) ? "REPLAY_ONLY " : "",
40768 convert_reiserfs(sb) ? "CONV " : "",
40769- atomic_read(&r->s_generation_counter),
40770+ atomic_read_unchecked(&r->s_generation_counter),
40771 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40772 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40773 SF(s_good_search_by_key_reada), SF(s_bmaps),
40774@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40775 struct journal_params *jp = &rs->s_v1.s_journal;
40776 char b[BDEVNAME_SIZE];
40777
40778+ pax_track_stack();
40779+
40780 seq_printf(m, /* on-disk fields */
40781 "jp_journal_1st_block: \t%i\n"
40782 "jp_journal_dev: \t%s[%x]\n"
40783diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40784--- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40785+++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40786@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40787 int iter = 0;
40788 #endif
40789
40790+ pax_track_stack();
40791+
40792 BUG_ON(!th->t_trans_id);
40793
40794 init_tb_struct(th, &s_del_balance, sb, path,
40795@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40796 int retval;
40797 int quota_cut_bytes = 0;
40798
40799+ pax_track_stack();
40800+
40801 BUG_ON(!th->t_trans_id);
40802
40803 le_key2cpu_key(&cpu_key, key);
40804@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40805 int quota_cut_bytes;
40806 loff_t tail_pos = 0;
40807
40808+ pax_track_stack();
40809+
40810 BUG_ON(!th->t_trans_id);
40811
40812 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40813@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40814 int retval;
40815 int fs_gen;
40816
40817+ pax_track_stack();
40818+
40819 BUG_ON(!th->t_trans_id);
40820
40821 fs_gen = get_generation(inode->i_sb);
40822@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40823 int fs_gen = 0;
40824 int quota_bytes = 0;
40825
40826+ pax_track_stack();
40827+
40828 BUG_ON(!th->t_trans_id);
40829
40830 if (inode) { /* Do we count quotas for item? */
40831diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40832--- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40833+++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40834@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40835 {.option_name = NULL}
40836 };
40837
40838+ pax_track_stack();
40839+
40840 *blocks = 0;
40841 if (!options || !*options)
40842 /* use default configuration: create tails, journaling on, no
40843diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40844--- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40845+++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40846@@ -20,6 +20,7 @@
40847 #include <linux/module.h>
40848 #include <linux/slab.h>
40849 #include <linux/poll.h>
40850+#include <linux/security.h>
40851 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40852 #include <linux/file.h>
40853 #include <linux/fdtable.h>
40854@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40855 int retval, i, timed_out = 0;
40856 unsigned long slack = 0;
40857
40858+ pax_track_stack();
40859+
40860 rcu_read_lock();
40861 retval = max_select_fd(n, fds);
40862 rcu_read_unlock();
40863@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40864 /* Allocate small arguments on the stack to save memory and be faster */
40865 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40866
40867+ pax_track_stack();
40868+
40869 ret = -EINVAL;
40870 if (n < 0)
40871 goto out_nofds;
40872@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40873 struct poll_list *walk = head;
40874 unsigned long todo = nfds;
40875
40876+ pax_track_stack();
40877+
40878+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40879 if (nfds > rlimit(RLIMIT_NOFILE))
40880 return -EINVAL;
40881
40882diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40883--- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40884+++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40885@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40886 return 0;
40887 }
40888 if (!m->buf) {
40889- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40890+ m->size = PAGE_SIZE;
40891+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40892 if (!m->buf)
40893 return -ENOMEM;
40894 }
40895@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40896 Eoverflow:
40897 m->op->stop(m, p);
40898 kfree(m->buf);
40899- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40900+ m->size <<= 1;
40901+ m->buf = kmalloc(m->size, GFP_KERNEL);
40902 return !m->buf ? -ENOMEM : -EAGAIN;
40903 }
40904
40905@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40906 m->version = file->f_version;
40907 /* grab buffer if we didn't have one */
40908 if (!m->buf) {
40909- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40910+ m->size = PAGE_SIZE;
40911+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40912 if (!m->buf)
40913 goto Enomem;
40914 }
40915@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40916 goto Fill;
40917 m->op->stop(m, p);
40918 kfree(m->buf);
40919- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40920+ m->size <<= 1;
40921+ m->buf = kmalloc(m->size, GFP_KERNEL);
40922 if (!m->buf)
40923 goto Enomem;
40924 m->count = 0;
40925@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40926 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40927 void *data)
40928 {
40929- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40930+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40931 int res = -ENOMEM;
40932
40933 if (op) {
40934diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40935--- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40936+++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40937@@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40938 pipe_lock(pipe);
40939
40940 for (;;) {
40941- if (!pipe->readers) {
40942+ if (!atomic_read(&pipe->readers)) {
40943 send_sig(SIGPIPE, current, 0);
40944 if (!ret)
40945 ret = -EPIPE;
40946@@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40947 do_wakeup = 0;
40948 }
40949
40950- pipe->waiting_writers++;
40951+ atomic_inc(&pipe->waiting_writers);
40952 pipe_wait(pipe);
40953- pipe->waiting_writers--;
40954+ atomic_dec(&pipe->waiting_writers);
40955 }
40956
40957 pipe_unlock(pipe);
40958@@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40959 .spd_release = spd_release_page,
40960 };
40961
40962+ pax_track_stack();
40963+
40964 if (splice_grow_spd(pipe, &spd))
40965 return -ENOMEM;
40966
40967@@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40968 old_fs = get_fs();
40969 set_fs(get_ds());
40970 /* The cast to a user pointer is valid due to the set_fs() */
40971- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40972+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40973 set_fs(old_fs);
40974
40975 return res;
40976@@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
40977 old_fs = get_fs();
40978 set_fs(get_ds());
40979 /* The cast to a user pointer is valid due to the set_fs() */
40980- res = vfs_write(file, (const char __user *)buf, count, &pos);
40981+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40982 set_fs(old_fs);
40983
40984 return res;
40985@@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
40986 .spd_release = spd_release_page,
40987 };
40988
40989+ pax_track_stack();
40990+
40991 if (splice_grow_spd(pipe, &spd))
40992 return -ENOMEM;
40993
40994@@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
40995 goto err;
40996
40997 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40998- vec[i].iov_base = (void __user *) page_address(page);
40999+ vec[i].iov_base = (__force void __user *) page_address(page);
41000 vec[i].iov_len = this_len;
41001 spd.pages[i] = page;
41002 spd.nr_pages++;
41003@@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
41004 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
41005 {
41006 while (!pipe->nrbufs) {
41007- if (!pipe->writers)
41008+ if (!atomic_read(&pipe->writers))
41009 return 0;
41010
41011- if (!pipe->waiting_writers && sd->num_spliced)
41012+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
41013 return 0;
41014
41015 if (sd->flags & SPLICE_F_NONBLOCK)
41016@@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
41017 * out of the pipe right after the splice_to_pipe(). So set
41018 * PIPE_READERS appropriately.
41019 */
41020- pipe->readers = 1;
41021+ atomic_set(&pipe->readers, 1);
41022
41023 current->splice_pipe = pipe;
41024 }
41025@@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
41026 };
41027 long ret;
41028
41029+ pax_track_stack();
41030+
41031 pipe = get_pipe_info(file);
41032 if (!pipe)
41033 return -EBADF;
41034@@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
41035 ret = -ERESTARTSYS;
41036 break;
41037 }
41038- if (!pipe->writers)
41039+ if (!atomic_read(&pipe->writers))
41040 break;
41041- if (!pipe->waiting_writers) {
41042+ if (!atomic_read(&pipe->waiting_writers)) {
41043 if (flags & SPLICE_F_NONBLOCK) {
41044 ret = -EAGAIN;
41045 break;
41046@@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
41047 pipe_lock(pipe);
41048
41049 while (pipe->nrbufs >= pipe->buffers) {
41050- if (!pipe->readers) {
41051+ if (!atomic_read(&pipe->readers)) {
41052 send_sig(SIGPIPE, current, 0);
41053 ret = -EPIPE;
41054 break;
41055@@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
41056 ret = -ERESTARTSYS;
41057 break;
41058 }
41059- pipe->waiting_writers++;
41060+ atomic_inc(&pipe->waiting_writers);
41061 pipe_wait(pipe);
41062- pipe->waiting_writers--;
41063+ atomic_dec(&pipe->waiting_writers);
41064 }
41065
41066 pipe_unlock(pipe);
41067@@ -1815,14 +1821,14 @@ retry:
41068 pipe_double_lock(ipipe, opipe);
41069
41070 do {
41071- if (!opipe->readers) {
41072+ if (!atomic_read(&opipe->readers)) {
41073 send_sig(SIGPIPE, current, 0);
41074 if (!ret)
41075 ret = -EPIPE;
41076 break;
41077 }
41078
41079- if (!ipipe->nrbufs && !ipipe->writers)
41080+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41081 break;
41082
41083 /*
41084@@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
41085 pipe_double_lock(ipipe, opipe);
41086
41087 do {
41088- if (!opipe->readers) {
41089+ if (!atomic_read(&opipe->readers)) {
41090 send_sig(SIGPIPE, current, 0);
41091 if (!ret)
41092 ret = -EPIPE;
41093@@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
41094 * return EAGAIN if we have the potential of some data in the
41095 * future, otherwise just return 0
41096 */
41097- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41098+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41099 ret = -EAGAIN;
41100
41101 pipe_unlock(ipipe);
41102diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
41103--- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
41104+++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
41105@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41106
41107 struct sysfs_open_dirent {
41108 atomic_t refcnt;
41109- atomic_t event;
41110+ atomic_unchecked_t event;
41111 wait_queue_head_t poll;
41112 struct list_head buffers; /* goes through sysfs_buffer.list */
41113 };
41114@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
41115 if (!sysfs_get_active(attr_sd))
41116 return -ENODEV;
41117
41118- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41119+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41120 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41121
41122 sysfs_put_active(attr_sd);
41123@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
41124 return -ENOMEM;
41125
41126 atomic_set(&new_od->refcnt, 0);
41127- atomic_set(&new_od->event, 1);
41128+ atomic_set_unchecked(&new_od->event, 1);
41129 init_waitqueue_head(&new_od->poll);
41130 INIT_LIST_HEAD(&new_od->buffers);
41131 goto retry;
41132@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
41133
41134 sysfs_put_active(attr_sd);
41135
41136- if (buffer->event != atomic_read(&od->event))
41137+ if (buffer->event != atomic_read_unchecked(&od->event))
41138 goto trigger;
41139
41140 return DEFAULT_POLLMASK;
41141@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
41142
41143 od = sd->s_attr.open;
41144 if (od) {
41145- atomic_inc(&od->event);
41146+ atomic_inc_unchecked(&od->event);
41147 wake_up_interruptible(&od->poll);
41148 }
41149
41150diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41151--- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41152+++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41153@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41154 .s_name = "",
41155 .s_count = ATOMIC_INIT(1),
41156 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41157+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41158+ .s_mode = S_IFDIR | S_IRWXU,
41159+#else
41160 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41161+#endif
41162 .s_ino = 1,
41163 };
41164
41165diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41166--- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41167+++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41168@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41169
41170 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41171 {
41172- char *page = nd_get_link(nd);
41173+ const char *page = nd_get_link(nd);
41174 if (!IS_ERR(page))
41175 free_page((unsigned long)page);
41176 }
41177diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41178--- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41179+++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41180@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41181 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41182 int lastblock = 0;
41183
41184+ pax_track_stack();
41185+
41186 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41187 prev_epos.block = iinfo->i_location;
41188 prev_epos.bh = NULL;
41189diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41190--- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41191+++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41192@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41193
41194 u8 udf_tag_checksum(const struct tag *t)
41195 {
41196- u8 *data = (u8 *)t;
41197+ const u8 *data = (const u8 *)t;
41198 u8 checksum = 0;
41199 int i;
41200 for (i = 0; i < sizeof(struct tag); ++i)
41201diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41202--- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41203+++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41204@@ -1,6 +1,7 @@
41205 #include <linux/compiler.h>
41206 #include <linux/file.h>
41207 #include <linux/fs.h>
41208+#include <linux/security.h>
41209 #include <linux/linkage.h>
41210 #include <linux/mount.h>
41211 #include <linux/namei.h>
41212@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41213 goto mnt_drop_write_and_out;
41214 }
41215 }
41216+
41217+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41218+ error = -EACCES;
41219+ goto mnt_drop_write_and_out;
41220+ }
41221+
41222 mutex_lock(&inode->i_mutex);
41223 error = notify_change(path->dentry, &newattrs);
41224 mutex_unlock(&inode->i_mutex);
41225diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41226--- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41227+++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41228@@ -17,8 +17,8 @@
41229 struct posix_acl *
41230 posix_acl_from_xattr(const void *value, size_t size)
41231 {
41232- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41233- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41234+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41235+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41236 int count;
41237 struct posix_acl *acl;
41238 struct posix_acl_entry *acl_e;
41239diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41240--- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41241+++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41242@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41243 * Extended attribute SET operations
41244 */
41245 static long
41246-setxattr(struct dentry *d, const char __user *name, const void __user *value,
41247+setxattr(struct path *path, const char __user *name, const void __user *value,
41248 size_t size, int flags)
41249 {
41250 int error;
41251@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41252 return PTR_ERR(kvalue);
41253 }
41254
41255- error = vfs_setxattr(d, kname, kvalue, size, flags);
41256+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41257+ error = -EACCES;
41258+ goto out;
41259+ }
41260+
41261+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41262+out:
41263 kfree(kvalue);
41264 return error;
41265 }
41266@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41267 return error;
41268 error = mnt_want_write(path.mnt);
41269 if (!error) {
41270- error = setxattr(path.dentry, name, value, size, flags);
41271+ error = setxattr(&path, name, value, size, flags);
41272 mnt_drop_write(path.mnt);
41273 }
41274 path_put(&path);
41275@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41276 return error;
41277 error = mnt_want_write(path.mnt);
41278 if (!error) {
41279- error = setxattr(path.dentry, name, value, size, flags);
41280+ error = setxattr(&path, name, value, size, flags);
41281 mnt_drop_write(path.mnt);
41282 }
41283 path_put(&path);
41284@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41285 const void __user *,value, size_t, size, int, flags)
41286 {
41287 struct file *f;
41288- struct dentry *dentry;
41289 int error = -EBADF;
41290
41291 f = fget(fd);
41292 if (!f)
41293 return error;
41294- dentry = f->f_path.dentry;
41295- audit_inode(NULL, dentry);
41296+ audit_inode(NULL, f->f_path.dentry);
41297 error = mnt_want_write_file(f);
41298 if (!error) {
41299- error = setxattr(dentry, name, value, size, flags);
41300+ error = setxattr(&f->f_path, name, value, size, flags);
41301 mnt_drop_write(f->f_path.mnt);
41302 }
41303 fput(f);
41304diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41305--- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41306+++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41307@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41308 xfs_fsop_geom_t fsgeo;
41309 int error;
41310
41311+ memset(&fsgeo, 0, sizeof(fsgeo));
41312 error = xfs_fs_geometry(mp, &fsgeo, 3);
41313 if (error)
41314 return -error;
41315diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41316--- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41317+++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41318@@ -128,7 +128,7 @@ xfs_find_handle(
41319 }
41320
41321 error = -EFAULT;
41322- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41323+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41324 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41325 goto out_put;
41326
41327diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41328--- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41329+++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41330@@ -437,7 +437,7 @@ xfs_vn_put_link(
41331 struct nameidata *nd,
41332 void *p)
41333 {
41334- char *s = nd_get_link(nd);
41335+ const char *s = nd_get_link(nd);
41336
41337 if (!IS_ERR(s))
41338 kfree(s);
41339diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41340--- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41341+++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41342@@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41343 int nmap,
41344 int ret_nmap);
41345 #else
41346-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41347+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41348 #endif /* DEBUG */
41349
41350 STATIC int
41351diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41352--- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41353+++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41354@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41355 }
41356
41357 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41358- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41359+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41360+ char name[sfep->namelen];
41361+ memcpy(name, sfep->name, sfep->namelen);
41362+ if (filldir(dirent, name, sfep->namelen,
41363+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
41364+ *offset = off & 0x7fffffff;
41365+ return 0;
41366+ }
41367+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41368 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41369 *offset = off & 0x7fffffff;
41370 return 0;
41371diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41372--- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41373+++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41374@@ -0,0 +1,105 @@
41375+#include <linux/kernel.h>
41376+#include <linux/mm.h>
41377+#include <linux/slab.h>
41378+#include <linux/vmalloc.h>
41379+#include <linux/gracl.h>
41380+#include <linux/grsecurity.h>
41381+
41382+static unsigned long alloc_stack_next = 1;
41383+static unsigned long alloc_stack_size = 1;
41384+static void **alloc_stack;
41385+
41386+static __inline__ int
41387+alloc_pop(void)
41388+{
41389+ if (alloc_stack_next == 1)
41390+ return 0;
41391+
41392+ kfree(alloc_stack[alloc_stack_next - 2]);
41393+
41394+ alloc_stack_next--;
41395+
41396+ return 1;
41397+}
41398+
41399+static __inline__ int
41400+alloc_push(void *buf)
41401+{
41402+ if (alloc_stack_next >= alloc_stack_size)
41403+ return 1;
41404+
41405+ alloc_stack[alloc_stack_next - 1] = buf;
41406+
41407+ alloc_stack_next++;
41408+
41409+ return 0;
41410+}
41411+
41412+void *
41413+acl_alloc(unsigned long len)
41414+{
41415+ void *ret = NULL;
41416+
41417+ if (!len || len > PAGE_SIZE)
41418+ goto out;
41419+
41420+ ret = kmalloc(len, GFP_KERNEL);
41421+
41422+ if (ret) {
41423+ if (alloc_push(ret)) {
41424+ kfree(ret);
41425+ ret = NULL;
41426+ }
41427+ }
41428+
41429+out:
41430+ return ret;
41431+}
41432+
41433+void *
41434+acl_alloc_num(unsigned long num, unsigned long len)
41435+{
41436+ if (!len || (num > (PAGE_SIZE / len)))
41437+ return NULL;
41438+
41439+ return acl_alloc(num * len);
41440+}
41441+
41442+void
41443+acl_free_all(void)
41444+{
41445+ if (gr_acl_is_enabled() || !alloc_stack)
41446+ return;
41447+
41448+ while (alloc_pop()) ;
41449+
41450+ if (alloc_stack) {
41451+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41452+ kfree(alloc_stack);
41453+ else
41454+ vfree(alloc_stack);
41455+ }
41456+
41457+ alloc_stack = NULL;
41458+ alloc_stack_size = 1;
41459+ alloc_stack_next = 1;
41460+
41461+ return;
41462+}
41463+
41464+int
41465+acl_alloc_stack_init(unsigned long size)
41466+{
41467+ if ((size * sizeof (void *)) <= PAGE_SIZE)
41468+ alloc_stack =
41469+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41470+ else
41471+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
41472+
41473+ alloc_stack_size = size;
41474+
41475+ if (!alloc_stack)
41476+ return 0;
41477+ else
41478+ return 1;
41479+}
41480diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41481--- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41482+++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41483@@ -0,0 +1,4106 @@
41484+#include <linux/kernel.h>
41485+#include <linux/module.h>
41486+#include <linux/sched.h>
41487+#include <linux/mm.h>
41488+#include <linux/file.h>
41489+#include <linux/fs.h>
41490+#include <linux/namei.h>
41491+#include <linux/mount.h>
41492+#include <linux/tty.h>
41493+#include <linux/proc_fs.h>
41494+#include <linux/lglock.h>
41495+#include <linux/slab.h>
41496+#include <linux/vmalloc.h>
41497+#include <linux/types.h>
41498+#include <linux/sysctl.h>
41499+#include <linux/netdevice.h>
41500+#include <linux/ptrace.h>
41501+#include <linux/gracl.h>
41502+#include <linux/gralloc.h>
41503+#include <linux/grsecurity.h>
41504+#include <linux/grinternal.h>
41505+#include <linux/pid_namespace.h>
41506+#include <linux/fdtable.h>
41507+#include <linux/percpu.h>
41508+
41509+#include <asm/uaccess.h>
41510+#include <asm/errno.h>
41511+#include <asm/mman.h>
41512+
41513+static struct acl_role_db acl_role_set;
41514+static struct name_db name_set;
41515+static struct inodev_db inodev_set;
41516+
41517+/* for keeping track of userspace pointers used for subjects, so we
41518+ can share references in the kernel as well
41519+*/
41520+
41521+static struct path real_root;
41522+
41523+static struct acl_subj_map_db subj_map_set;
41524+
41525+static struct acl_role_label *default_role;
41526+
41527+static struct acl_role_label *role_list;
41528+
41529+static u16 acl_sp_role_value;
41530+
41531+extern char *gr_shared_page[4];
41532+static DEFINE_MUTEX(gr_dev_mutex);
41533+DEFINE_RWLOCK(gr_inode_lock);
41534+
41535+struct gr_arg *gr_usermode;
41536+
41537+static unsigned int gr_status __read_only = GR_STATUS_INIT;
41538+
41539+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41540+extern void gr_clear_learn_entries(void);
41541+
41542+#ifdef CONFIG_GRKERNSEC_RESLOG
41543+extern void gr_log_resource(const struct task_struct *task,
41544+ const int res, const unsigned long wanted, const int gt);
41545+#endif
41546+
41547+unsigned char *gr_system_salt;
41548+unsigned char *gr_system_sum;
41549+
41550+static struct sprole_pw **acl_special_roles = NULL;
41551+static __u16 num_sprole_pws = 0;
41552+
41553+static struct acl_role_label *kernel_role = NULL;
41554+
41555+static unsigned int gr_auth_attempts = 0;
41556+static unsigned long gr_auth_expires = 0UL;
41557+
41558+#ifdef CONFIG_NET
41559+extern struct vfsmount *sock_mnt;
41560+#endif
41561+
41562+extern struct vfsmount *pipe_mnt;
41563+extern struct vfsmount *shm_mnt;
41564+#ifdef CONFIG_HUGETLBFS
41565+extern struct vfsmount *hugetlbfs_vfsmount;
41566+#endif
41567+
41568+static struct acl_object_label *fakefs_obj_rw;
41569+static struct acl_object_label *fakefs_obj_rwx;
41570+
41571+extern int gr_init_uidset(void);
41572+extern void gr_free_uidset(void);
41573+extern void gr_remove_uid(uid_t uid);
41574+extern int gr_find_uid(uid_t uid);
41575+
41576+DECLARE_BRLOCK(vfsmount_lock);
41577+
41578+__inline__ int
41579+gr_acl_is_enabled(void)
41580+{
41581+ return (gr_status & GR_READY);
41582+}
41583+
41584+#ifdef CONFIG_BTRFS_FS
41585+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41586+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41587+#endif
41588+
41589+static inline dev_t __get_dev(const struct dentry *dentry)
41590+{
41591+#ifdef CONFIG_BTRFS_FS
41592+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41593+ return get_btrfs_dev_from_inode(dentry->d_inode);
41594+ else
41595+#endif
41596+ return dentry->d_inode->i_sb->s_dev;
41597+}
41598+
41599+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41600+{
41601+ return __get_dev(dentry);
41602+}
41603+
41604+static char gr_task_roletype_to_char(struct task_struct *task)
41605+{
41606+ switch (task->role->roletype &
41607+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41608+ GR_ROLE_SPECIAL)) {
41609+ case GR_ROLE_DEFAULT:
41610+ return 'D';
41611+ case GR_ROLE_USER:
41612+ return 'U';
41613+ case GR_ROLE_GROUP:
41614+ return 'G';
41615+ case GR_ROLE_SPECIAL:
41616+ return 'S';
41617+ }
41618+
41619+ return 'X';
41620+}
41621+
41622+char gr_roletype_to_char(void)
41623+{
41624+ return gr_task_roletype_to_char(current);
41625+}
41626+
41627+__inline__ int
41628+gr_acl_tpe_check(void)
41629+{
41630+ if (unlikely(!(gr_status & GR_READY)))
41631+ return 0;
41632+ if (current->role->roletype & GR_ROLE_TPE)
41633+ return 1;
41634+ else
41635+ return 0;
41636+}
41637+
41638+int
41639+gr_handle_rawio(const struct inode *inode)
41640+{
41641+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41642+ if (inode && S_ISBLK(inode->i_mode) &&
41643+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41644+ !capable(CAP_SYS_RAWIO))
41645+ return 1;
41646+#endif
41647+ return 0;
41648+}
41649+
41650+static int
41651+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41652+{
41653+ if (likely(lena != lenb))
41654+ return 0;
41655+
41656+ return !memcmp(a, b, lena);
41657+}
41658+
41659+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41660+{
41661+ *buflen -= namelen;
41662+ if (*buflen < 0)
41663+ return -ENAMETOOLONG;
41664+ *buffer -= namelen;
41665+ memcpy(*buffer, str, namelen);
41666+ return 0;
41667+}
41668+
41669+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41670+{
41671+ return prepend(buffer, buflen, name->name, name->len);
41672+}
41673+
41674+static int prepend_path(const struct path *path, struct path *root,
41675+ char **buffer, int *buflen)
41676+{
41677+ struct dentry *dentry = path->dentry;
41678+ struct vfsmount *vfsmnt = path->mnt;
41679+ bool slash = false;
41680+ int error = 0;
41681+
41682+ while (dentry != root->dentry || vfsmnt != root->mnt) {
41683+ struct dentry * parent;
41684+
41685+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41686+ /* Global root? */
41687+ if (vfsmnt->mnt_parent == vfsmnt) {
41688+ goto out;
41689+ }
41690+ dentry = vfsmnt->mnt_mountpoint;
41691+ vfsmnt = vfsmnt->mnt_parent;
41692+ continue;
41693+ }
41694+ parent = dentry->d_parent;
41695+ prefetch(parent);
41696+ spin_lock(&dentry->d_lock);
41697+ error = prepend_name(buffer, buflen, &dentry->d_name);
41698+ spin_unlock(&dentry->d_lock);
41699+ if (!error)
41700+ error = prepend(buffer, buflen, "/", 1);
41701+ if (error)
41702+ break;
41703+
41704+ slash = true;
41705+ dentry = parent;
41706+ }
41707+
41708+out:
41709+ if (!error && !slash)
41710+ error = prepend(buffer, buflen, "/", 1);
41711+
41712+ return error;
41713+}
41714+
41715+/* this must be called with vfsmount_lock and rename_lock held */
41716+
41717+static char *__our_d_path(const struct path *path, struct path *root,
41718+ char *buf, int buflen)
41719+{
41720+ char *res = buf + buflen;
41721+ int error;
41722+
41723+ prepend(&res, &buflen, "\0", 1);
41724+ error = prepend_path(path, root, &res, &buflen);
41725+ if (error)
41726+ return ERR_PTR(error);
41727+
41728+ return res;
41729+}
41730+
41731+static char *
41732+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41733+{
41734+ char *retval;
41735+
41736+ retval = __our_d_path(path, root, buf, buflen);
41737+ if (unlikely(IS_ERR(retval)))
41738+ retval = strcpy(buf, "<path too long>");
41739+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41740+ retval[1] = '\0';
41741+
41742+ return retval;
41743+}
41744+
41745+static char *
41746+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41747+ char *buf, int buflen)
41748+{
41749+ struct path path;
41750+ char *res;
41751+
41752+ path.dentry = (struct dentry *)dentry;
41753+ path.mnt = (struct vfsmount *)vfsmnt;
41754+
41755+ /* we can use real_root.dentry, real_root.mnt, because this is only called
41756+ by the RBAC system */
41757+ res = gen_full_path(&path, &real_root, buf, buflen);
41758+
41759+ return res;
41760+}
41761+
41762+static char *
41763+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41764+ char *buf, int buflen)
41765+{
41766+ char *res;
41767+ struct path path;
41768+ struct path root;
41769+ struct task_struct *reaper = &init_task;
41770+
41771+ path.dentry = (struct dentry *)dentry;
41772+ path.mnt = (struct vfsmount *)vfsmnt;
41773+
41774+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41775+ get_fs_root(reaper->fs, &root);
41776+
41777+ write_seqlock(&rename_lock);
41778+ br_read_lock(vfsmount_lock);
41779+ res = gen_full_path(&path, &root, buf, buflen);
41780+ br_read_unlock(vfsmount_lock);
41781+ write_sequnlock(&rename_lock);
41782+
41783+ path_put(&root);
41784+ return res;
41785+}
41786+
41787+static char *
41788+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41789+{
41790+ char *ret;
41791+ write_seqlock(&rename_lock);
41792+ br_read_lock(vfsmount_lock);
41793+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41794+ PAGE_SIZE);
41795+ br_read_unlock(vfsmount_lock);
41796+ write_sequnlock(&rename_lock);
41797+ return ret;
41798+}
41799+
41800+char *
41801+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41802+{
41803+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41804+ PAGE_SIZE);
41805+}
41806+
41807+char *
41808+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41809+{
41810+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41811+ PAGE_SIZE);
41812+}
41813+
41814+char *
41815+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41816+{
41817+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41818+ PAGE_SIZE);
41819+}
41820+
41821+char *
41822+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41823+{
41824+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41825+ PAGE_SIZE);
41826+}
41827+
41828+char *
41829+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41830+{
41831+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41832+ PAGE_SIZE);
41833+}
41834+
41835+__inline__ __u32
41836+to_gr_audit(const __u32 reqmode)
41837+{
41838+ /* masks off auditable permission flags, then shifts them to create
41839+ auditing flags, and adds the special case of append auditing if
41840+ we're requesting write */
41841+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41842+}
41843+
41844+struct acl_subject_label *
41845+lookup_subject_map(const struct acl_subject_label *userp)
41846+{
41847+ unsigned int index = shash(userp, subj_map_set.s_size);
41848+ struct subject_map *match;
41849+
41850+ match = subj_map_set.s_hash[index];
41851+
41852+ while (match && match->user != userp)
41853+ match = match->next;
41854+
41855+ if (match != NULL)
41856+ return match->kernel;
41857+ else
41858+ return NULL;
41859+}
41860+
41861+static void
41862+insert_subj_map_entry(struct subject_map *subjmap)
41863+{
41864+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41865+ struct subject_map **curr;
41866+
41867+ subjmap->prev = NULL;
41868+
41869+ curr = &subj_map_set.s_hash[index];
41870+ if (*curr != NULL)
41871+ (*curr)->prev = subjmap;
41872+
41873+ subjmap->next = *curr;
41874+ *curr = subjmap;
41875+
41876+ return;
41877+}
41878+
41879+static struct acl_role_label *
41880+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41881+ const gid_t gid)
41882+{
41883+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41884+ struct acl_role_label *match;
41885+ struct role_allowed_ip *ipp;
41886+ unsigned int x;
41887+ u32 curr_ip = task->signal->curr_ip;
41888+
41889+ task->signal->saved_ip = curr_ip;
41890+
41891+ match = acl_role_set.r_hash[index];
41892+
41893+ while (match) {
41894+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41895+ for (x = 0; x < match->domain_child_num; x++) {
41896+ if (match->domain_children[x] == uid)
41897+ goto found;
41898+ }
41899+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41900+ break;
41901+ match = match->next;
41902+ }
41903+found:
41904+ if (match == NULL) {
41905+ try_group:
41906+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41907+ match = acl_role_set.r_hash[index];
41908+
41909+ while (match) {
41910+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41911+ for (x = 0; x < match->domain_child_num; x++) {
41912+ if (match->domain_children[x] == gid)
41913+ goto found2;
41914+ }
41915+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41916+ break;
41917+ match = match->next;
41918+ }
41919+found2:
41920+ if (match == NULL)
41921+ match = default_role;
41922+ if (match->allowed_ips == NULL)
41923+ return match;
41924+ else {
41925+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41926+ if (likely
41927+ ((ntohl(curr_ip) & ipp->netmask) ==
41928+ (ntohl(ipp->addr) & ipp->netmask)))
41929+ return match;
41930+ }
41931+ match = default_role;
41932+ }
41933+ } else if (match->allowed_ips == NULL) {
41934+ return match;
41935+ } else {
41936+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41937+ if (likely
41938+ ((ntohl(curr_ip) & ipp->netmask) ==
41939+ (ntohl(ipp->addr) & ipp->netmask)))
41940+ return match;
41941+ }
41942+ goto try_group;
41943+ }
41944+
41945+ return match;
41946+}
41947+
41948+struct acl_subject_label *
41949+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41950+ const struct acl_role_label *role)
41951+{
41952+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41953+ struct acl_subject_label *match;
41954+
41955+ match = role->subj_hash[index];
41956+
41957+ while (match && (match->inode != ino || match->device != dev ||
41958+ (match->mode & GR_DELETED))) {
41959+ match = match->next;
41960+ }
41961+
41962+ if (match && !(match->mode & GR_DELETED))
41963+ return match;
41964+ else
41965+ return NULL;
41966+}
41967+
41968+struct acl_subject_label *
41969+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41970+ const struct acl_role_label *role)
41971+{
41972+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41973+ struct acl_subject_label *match;
41974+
41975+ match = role->subj_hash[index];
41976+
41977+ while (match && (match->inode != ino || match->device != dev ||
41978+ !(match->mode & GR_DELETED))) {
41979+ match = match->next;
41980+ }
41981+
41982+ if (match && (match->mode & GR_DELETED))
41983+ return match;
41984+ else
41985+ return NULL;
41986+}
41987+
41988+static struct acl_object_label *
41989+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41990+ const struct acl_subject_label *subj)
41991+{
41992+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41993+ struct acl_object_label *match;
41994+
41995+ match = subj->obj_hash[index];
41996+
41997+ while (match && (match->inode != ino || match->device != dev ||
41998+ (match->mode & GR_DELETED))) {
41999+ match = match->next;
42000+ }
42001+
42002+ if (match && !(match->mode & GR_DELETED))
42003+ return match;
42004+ else
42005+ return NULL;
42006+}
42007+
42008+static struct acl_object_label *
42009+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
42010+ const struct acl_subject_label *subj)
42011+{
42012+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42013+ struct acl_object_label *match;
42014+
42015+ match = subj->obj_hash[index];
42016+
42017+ while (match && (match->inode != ino || match->device != dev ||
42018+ !(match->mode & GR_DELETED))) {
42019+ match = match->next;
42020+ }
42021+
42022+ if (match && (match->mode & GR_DELETED))
42023+ return match;
42024+
42025+ match = subj->obj_hash[index];
42026+
42027+ while (match && (match->inode != ino || match->device != dev ||
42028+ (match->mode & GR_DELETED))) {
42029+ match = match->next;
42030+ }
42031+
42032+ if (match && !(match->mode & GR_DELETED))
42033+ return match;
42034+ else
42035+ return NULL;
42036+}
42037+
42038+static struct name_entry *
42039+lookup_name_entry(const char *name)
42040+{
42041+ unsigned int len = strlen(name);
42042+ unsigned int key = full_name_hash(name, len);
42043+ unsigned int index = key % name_set.n_size;
42044+ struct name_entry *match;
42045+
42046+ match = name_set.n_hash[index];
42047+
42048+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
42049+ match = match->next;
42050+
42051+ return match;
42052+}
42053+
42054+static struct name_entry *
42055+lookup_name_entry_create(const char *name)
42056+{
42057+ unsigned int len = strlen(name);
42058+ unsigned int key = full_name_hash(name, len);
42059+ unsigned int index = key % name_set.n_size;
42060+ struct name_entry *match;
42061+
42062+ match = name_set.n_hash[index];
42063+
42064+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42065+ !match->deleted))
42066+ match = match->next;
42067+
42068+ if (match && match->deleted)
42069+ return match;
42070+
42071+ match = name_set.n_hash[index];
42072+
42073+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42074+ match->deleted))
42075+ match = match->next;
42076+
42077+ if (match && !match->deleted)
42078+ return match;
42079+ else
42080+ return NULL;
42081+}
42082+
42083+static struct inodev_entry *
42084+lookup_inodev_entry(const ino_t ino, const dev_t dev)
42085+{
42086+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
42087+ struct inodev_entry *match;
42088+
42089+ match = inodev_set.i_hash[index];
42090+
42091+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42092+ match = match->next;
42093+
42094+ return match;
42095+}
42096+
42097+static void
42098+insert_inodev_entry(struct inodev_entry *entry)
42099+{
42100+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42101+ inodev_set.i_size);
42102+ struct inodev_entry **curr;
42103+
42104+ entry->prev = NULL;
42105+
42106+ curr = &inodev_set.i_hash[index];
42107+ if (*curr != NULL)
42108+ (*curr)->prev = entry;
42109+
42110+ entry->next = *curr;
42111+ *curr = entry;
42112+
42113+ return;
42114+}
42115+
42116+static void
42117+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42118+{
42119+ unsigned int index =
42120+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42121+ struct acl_role_label **curr;
42122+ struct acl_role_label *tmp;
42123+
42124+ curr = &acl_role_set.r_hash[index];
42125+
42126+ /* if role was already inserted due to domains and already has
42127+ a role in the same bucket as it attached, then we need to
42128+ combine these two buckets
42129+ */
42130+ if (role->next) {
42131+ tmp = role->next;
42132+ while (tmp->next)
42133+ tmp = tmp->next;
42134+ tmp->next = *curr;
42135+ } else
42136+ role->next = *curr;
42137+ *curr = role;
42138+
42139+ return;
42140+}
42141+
42142+static void
42143+insert_acl_role_label(struct acl_role_label *role)
42144+{
42145+ int i;
42146+
42147+ if (role_list == NULL) {
42148+ role_list = role;
42149+ role->prev = NULL;
42150+ } else {
42151+ role->prev = role_list;
42152+ role_list = role;
42153+ }
42154+
42155+ /* used for hash chains */
42156+ role->next = NULL;
42157+
42158+ if (role->roletype & GR_ROLE_DOMAIN) {
42159+ for (i = 0; i < role->domain_child_num; i++)
42160+ __insert_acl_role_label(role, role->domain_children[i]);
42161+ } else
42162+ __insert_acl_role_label(role, role->uidgid);
42163+}
42164+
42165+static int
42166+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42167+{
42168+ struct name_entry **curr, *nentry;
42169+ struct inodev_entry *ientry;
42170+ unsigned int len = strlen(name);
42171+ unsigned int key = full_name_hash(name, len);
42172+ unsigned int index = key % name_set.n_size;
42173+
42174+ curr = &name_set.n_hash[index];
42175+
42176+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42177+ curr = &((*curr)->next);
42178+
42179+ if (*curr != NULL)
42180+ return 1;
42181+
42182+ nentry = acl_alloc(sizeof (struct name_entry));
42183+ if (nentry == NULL)
42184+ return 0;
42185+ ientry = acl_alloc(sizeof (struct inodev_entry));
42186+ if (ientry == NULL)
42187+ return 0;
42188+ ientry->nentry = nentry;
42189+
42190+ nentry->key = key;
42191+ nentry->name = name;
42192+ nentry->inode = inode;
42193+ nentry->device = device;
42194+ nentry->len = len;
42195+ nentry->deleted = deleted;
42196+
42197+ nentry->prev = NULL;
42198+ curr = &name_set.n_hash[index];
42199+ if (*curr != NULL)
42200+ (*curr)->prev = nentry;
42201+ nentry->next = *curr;
42202+ *curr = nentry;
42203+
42204+ /* insert us into the table searchable by inode/dev */
42205+ insert_inodev_entry(ientry);
42206+
42207+ return 1;
42208+}
42209+
42210+static void
42211+insert_acl_obj_label(struct acl_object_label *obj,
42212+ struct acl_subject_label *subj)
42213+{
42214+ unsigned int index =
42215+ fhash(obj->inode, obj->device, subj->obj_hash_size);
42216+ struct acl_object_label **curr;
42217+
42218+
42219+ obj->prev = NULL;
42220+
42221+ curr = &subj->obj_hash[index];
42222+ if (*curr != NULL)
42223+ (*curr)->prev = obj;
42224+
42225+ obj->next = *curr;
42226+ *curr = obj;
42227+
42228+ return;
42229+}
42230+
42231+static void
42232+insert_acl_subj_label(struct acl_subject_label *obj,
42233+ struct acl_role_label *role)
42234+{
42235+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42236+ struct acl_subject_label **curr;
42237+
42238+ obj->prev = NULL;
42239+
42240+ curr = &role->subj_hash[index];
42241+ if (*curr != NULL)
42242+ (*curr)->prev = obj;
42243+
42244+ obj->next = *curr;
42245+ *curr = obj;
42246+
42247+ return;
42248+}
42249+
42250+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42251+
42252+static void *
42253+create_table(__u32 * len, int elementsize)
42254+{
42255+ unsigned int table_sizes[] = {
42256+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42257+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42258+ 4194301, 8388593, 16777213, 33554393, 67108859
42259+ };
42260+ void *newtable = NULL;
42261+ unsigned int pwr = 0;
42262+
42263+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42264+ table_sizes[pwr] <= *len)
42265+ pwr++;
42266+
42267+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42268+ return newtable;
42269+
42270+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42271+ newtable =
42272+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42273+ else
42274+ newtable = vmalloc(table_sizes[pwr] * elementsize);
42275+
42276+ *len = table_sizes[pwr];
42277+
42278+ return newtable;
42279+}
42280+
42281+static int
42282+init_variables(const struct gr_arg *arg)
42283+{
42284+ struct task_struct *reaper = &init_task;
42285+ unsigned int stacksize;
42286+
42287+ subj_map_set.s_size = arg->role_db.num_subjects;
42288+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42289+ name_set.n_size = arg->role_db.num_objects;
42290+ inodev_set.i_size = arg->role_db.num_objects;
42291+
42292+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
42293+ !name_set.n_size || !inodev_set.i_size)
42294+ return 1;
42295+
42296+ if (!gr_init_uidset())
42297+ return 1;
42298+
42299+ /* set up the stack that holds allocation info */
42300+
42301+ stacksize = arg->role_db.num_pointers + 5;
42302+
42303+ if (!acl_alloc_stack_init(stacksize))
42304+ return 1;
42305+
42306+ /* grab reference for the real root dentry and vfsmount */
42307+ get_fs_root(reaper->fs, &real_root);
42308+
42309+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42310+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42311+#endif
42312+
42313+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42314+ if (fakefs_obj_rw == NULL)
42315+ return 1;
42316+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42317+
42318+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42319+ if (fakefs_obj_rwx == NULL)
42320+ return 1;
42321+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42322+
42323+ subj_map_set.s_hash =
42324+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42325+ acl_role_set.r_hash =
42326+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42327+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42328+ inodev_set.i_hash =
42329+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42330+
42331+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42332+ !name_set.n_hash || !inodev_set.i_hash)
42333+ return 1;
42334+
42335+ memset(subj_map_set.s_hash, 0,
42336+ sizeof(struct subject_map *) * subj_map_set.s_size);
42337+ memset(acl_role_set.r_hash, 0,
42338+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
42339+ memset(name_set.n_hash, 0,
42340+ sizeof (struct name_entry *) * name_set.n_size);
42341+ memset(inodev_set.i_hash, 0,
42342+ sizeof (struct inodev_entry *) * inodev_set.i_size);
42343+
42344+ return 0;
42345+}
42346+
42347+/* free information not needed after startup
42348+ currently contains user->kernel pointer mappings for subjects
42349+*/
42350+
42351+static void
42352+free_init_variables(void)
42353+{
42354+ __u32 i;
42355+
42356+ if (subj_map_set.s_hash) {
42357+ for (i = 0; i < subj_map_set.s_size; i++) {
42358+ if (subj_map_set.s_hash[i]) {
42359+ kfree(subj_map_set.s_hash[i]);
42360+ subj_map_set.s_hash[i] = NULL;
42361+ }
42362+ }
42363+
42364+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42365+ PAGE_SIZE)
42366+ kfree(subj_map_set.s_hash);
42367+ else
42368+ vfree(subj_map_set.s_hash);
42369+ }
42370+
42371+ return;
42372+}
42373+
42374+static void
42375+free_variables(void)
42376+{
42377+ struct acl_subject_label *s;
42378+ struct acl_role_label *r;
42379+ struct task_struct *task, *task2;
42380+ unsigned int x;
42381+
42382+ gr_clear_learn_entries();
42383+
42384+ read_lock(&tasklist_lock);
42385+ do_each_thread(task2, task) {
42386+ task->acl_sp_role = 0;
42387+ task->acl_role_id = 0;
42388+ task->acl = NULL;
42389+ task->role = NULL;
42390+ } while_each_thread(task2, task);
42391+ read_unlock(&tasklist_lock);
42392+
42393+ /* release the reference to the real root dentry and vfsmount */
42394+ path_put(&real_root);
42395+
42396+ /* free all object hash tables */
42397+
42398+ FOR_EACH_ROLE_START(r)
42399+ if (r->subj_hash == NULL)
42400+ goto next_role;
42401+ FOR_EACH_SUBJECT_START(r, s, x)
42402+ if (s->obj_hash == NULL)
42403+ break;
42404+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42405+ kfree(s->obj_hash);
42406+ else
42407+ vfree(s->obj_hash);
42408+ FOR_EACH_SUBJECT_END(s, x)
42409+ FOR_EACH_NESTED_SUBJECT_START(r, s)
42410+ if (s->obj_hash == NULL)
42411+ break;
42412+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42413+ kfree(s->obj_hash);
42414+ else
42415+ vfree(s->obj_hash);
42416+ FOR_EACH_NESTED_SUBJECT_END(s)
42417+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42418+ kfree(r->subj_hash);
42419+ else
42420+ vfree(r->subj_hash);
42421+ r->subj_hash = NULL;
42422+next_role:
42423+ FOR_EACH_ROLE_END(r)
42424+
42425+ acl_free_all();
42426+
42427+ if (acl_role_set.r_hash) {
42428+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42429+ PAGE_SIZE)
42430+ kfree(acl_role_set.r_hash);
42431+ else
42432+ vfree(acl_role_set.r_hash);
42433+ }
42434+ if (name_set.n_hash) {
42435+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
42436+ PAGE_SIZE)
42437+ kfree(name_set.n_hash);
42438+ else
42439+ vfree(name_set.n_hash);
42440+ }
42441+
42442+ if (inodev_set.i_hash) {
42443+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42444+ PAGE_SIZE)
42445+ kfree(inodev_set.i_hash);
42446+ else
42447+ vfree(inodev_set.i_hash);
42448+ }
42449+
42450+ gr_free_uidset();
42451+
42452+ memset(&name_set, 0, sizeof (struct name_db));
42453+ memset(&inodev_set, 0, sizeof (struct inodev_db));
42454+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42455+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42456+
42457+ default_role = NULL;
42458+ role_list = NULL;
42459+
42460+ return;
42461+}
42462+
42463+static __u32
42464+count_user_objs(struct acl_object_label *userp)
42465+{
42466+ struct acl_object_label o_tmp;
42467+ __u32 num = 0;
42468+
42469+ while (userp) {
42470+ if (copy_from_user(&o_tmp, userp,
42471+ sizeof (struct acl_object_label)))
42472+ break;
42473+
42474+ userp = o_tmp.prev;
42475+ num++;
42476+ }
42477+
42478+ return num;
42479+}
42480+
42481+static struct acl_subject_label *
42482+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42483+
42484+static int
42485+copy_user_glob(struct acl_object_label *obj)
42486+{
42487+ struct acl_object_label *g_tmp, **guser;
42488+ unsigned int len;
42489+ char *tmp;
42490+
42491+ if (obj->globbed == NULL)
42492+ return 0;
42493+
42494+ guser = &obj->globbed;
42495+ while (*guser) {
42496+ g_tmp = (struct acl_object_label *)
42497+ acl_alloc(sizeof (struct acl_object_label));
42498+ if (g_tmp == NULL)
42499+ return -ENOMEM;
42500+
42501+ if (copy_from_user(g_tmp, *guser,
42502+ sizeof (struct acl_object_label)))
42503+ return -EFAULT;
42504+
42505+ len = strnlen_user(g_tmp->filename, PATH_MAX);
42506+
42507+ if (!len || len >= PATH_MAX)
42508+ return -EINVAL;
42509+
42510+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42511+ return -ENOMEM;
42512+
42513+ if (copy_from_user(tmp, g_tmp->filename, len))
42514+ return -EFAULT;
42515+ tmp[len-1] = '\0';
42516+ g_tmp->filename = tmp;
42517+
42518+ *guser = g_tmp;
42519+ guser = &(g_tmp->next);
42520+ }
42521+
42522+ return 0;
42523+}
42524+
42525+static int
42526+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42527+ struct acl_role_label *role)
42528+{
42529+ struct acl_object_label *o_tmp;
42530+ unsigned int len;
42531+ int ret;
42532+ char *tmp;
42533+
42534+ while (userp) {
42535+ if ((o_tmp = (struct acl_object_label *)
42536+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
42537+ return -ENOMEM;
42538+
42539+ if (copy_from_user(o_tmp, userp,
42540+ sizeof (struct acl_object_label)))
42541+ return -EFAULT;
42542+
42543+ userp = o_tmp->prev;
42544+
42545+ len = strnlen_user(o_tmp->filename, PATH_MAX);
42546+
42547+ if (!len || len >= PATH_MAX)
42548+ return -EINVAL;
42549+
42550+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42551+ return -ENOMEM;
42552+
42553+ if (copy_from_user(tmp, o_tmp->filename, len))
42554+ return -EFAULT;
42555+ tmp[len-1] = '\0';
42556+ o_tmp->filename = tmp;
42557+
42558+ insert_acl_obj_label(o_tmp, subj);
42559+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42560+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42561+ return -ENOMEM;
42562+
42563+ ret = copy_user_glob(o_tmp);
42564+ if (ret)
42565+ return ret;
42566+
42567+ if (o_tmp->nested) {
42568+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42569+ if (IS_ERR(o_tmp->nested))
42570+ return PTR_ERR(o_tmp->nested);
42571+
42572+ /* insert into nested subject list */
42573+ o_tmp->nested->next = role->hash->first;
42574+ role->hash->first = o_tmp->nested;
42575+ }
42576+ }
42577+
42578+ return 0;
42579+}
42580+
42581+static __u32
42582+count_user_subjs(struct acl_subject_label *userp)
42583+{
42584+ struct acl_subject_label s_tmp;
42585+ __u32 num = 0;
42586+
42587+ while (userp) {
42588+ if (copy_from_user(&s_tmp, userp,
42589+ sizeof (struct acl_subject_label)))
42590+ break;
42591+
42592+ userp = s_tmp.prev;
42593+ /* do not count nested subjects against this count, since
42594+ they are not included in the hash table, but are
42595+ attached to objects. We have already counted
42596+ the subjects in userspace for the allocation
42597+ stack
42598+ */
42599+ if (!(s_tmp.mode & GR_NESTED))
42600+ num++;
42601+ }
42602+
42603+ return num;
42604+}
42605+
42606+static int
42607+copy_user_allowedips(struct acl_role_label *rolep)
42608+{
42609+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42610+
42611+ ruserip = rolep->allowed_ips;
42612+
42613+ while (ruserip) {
42614+ rlast = rtmp;
42615+
42616+ if ((rtmp = (struct role_allowed_ip *)
42617+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42618+ return -ENOMEM;
42619+
42620+ if (copy_from_user(rtmp, ruserip,
42621+ sizeof (struct role_allowed_ip)))
42622+ return -EFAULT;
42623+
42624+ ruserip = rtmp->prev;
42625+
42626+ if (!rlast) {
42627+ rtmp->prev = NULL;
42628+ rolep->allowed_ips = rtmp;
42629+ } else {
42630+ rlast->next = rtmp;
42631+ rtmp->prev = rlast;
42632+ }
42633+
42634+ if (!ruserip)
42635+ rtmp->next = NULL;
42636+ }
42637+
42638+ return 0;
42639+}
42640+
42641+static int
42642+copy_user_transitions(struct acl_role_label *rolep)
42643+{
42644+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
42645+
42646+ unsigned int len;
42647+ char *tmp;
42648+
42649+ rusertp = rolep->transitions;
42650+
42651+ while (rusertp) {
42652+ rlast = rtmp;
42653+
42654+ if ((rtmp = (struct role_transition *)
42655+ acl_alloc(sizeof (struct role_transition))) == NULL)
42656+ return -ENOMEM;
42657+
42658+ if (copy_from_user(rtmp, rusertp,
42659+ sizeof (struct role_transition)))
42660+ return -EFAULT;
42661+
42662+ rusertp = rtmp->prev;
42663+
42664+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42665+
42666+ if (!len || len >= GR_SPROLE_LEN)
42667+ return -EINVAL;
42668+
42669+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42670+ return -ENOMEM;
42671+
42672+ if (copy_from_user(tmp, rtmp->rolename, len))
42673+ return -EFAULT;
42674+ tmp[len-1] = '\0';
42675+ rtmp->rolename = tmp;
42676+
42677+ if (!rlast) {
42678+ rtmp->prev = NULL;
42679+ rolep->transitions = rtmp;
42680+ } else {
42681+ rlast->next = rtmp;
42682+ rtmp->prev = rlast;
42683+ }
42684+
42685+ if (!rusertp)
42686+ rtmp->next = NULL;
42687+ }
42688+
42689+ return 0;
42690+}
42691+
42692+static struct acl_subject_label *
42693+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42694+{
42695+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42696+ unsigned int len;
42697+ char *tmp;
42698+ __u32 num_objs;
42699+ struct acl_ip_label **i_tmp, *i_utmp2;
42700+ struct gr_hash_struct ghash;
42701+ struct subject_map *subjmap;
42702+ unsigned int i_num;
42703+ int err;
42704+
42705+ s_tmp = lookup_subject_map(userp);
42706+
42707+ /* we've already copied this subject into the kernel, just return
42708+ the reference to it, and don't copy it over again
42709+ */
42710+ if (s_tmp)
42711+ return(s_tmp);
42712+
42713+ if ((s_tmp = (struct acl_subject_label *)
42714+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42715+ return ERR_PTR(-ENOMEM);
42716+
42717+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42718+ if (subjmap == NULL)
42719+ return ERR_PTR(-ENOMEM);
42720+
42721+ subjmap->user = userp;
42722+ subjmap->kernel = s_tmp;
42723+ insert_subj_map_entry(subjmap);
42724+
42725+ if (copy_from_user(s_tmp, userp,
42726+ sizeof (struct acl_subject_label)))
42727+ return ERR_PTR(-EFAULT);
42728+
42729+ len = strnlen_user(s_tmp->filename, PATH_MAX);
42730+
42731+ if (!len || len >= PATH_MAX)
42732+ return ERR_PTR(-EINVAL);
42733+
42734+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42735+ return ERR_PTR(-ENOMEM);
42736+
42737+ if (copy_from_user(tmp, s_tmp->filename, len))
42738+ return ERR_PTR(-EFAULT);
42739+ tmp[len-1] = '\0';
42740+ s_tmp->filename = tmp;
42741+
42742+ if (!strcmp(s_tmp->filename, "/"))
42743+ role->root_label = s_tmp;
42744+
42745+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42746+ return ERR_PTR(-EFAULT);
42747+
42748+ /* copy user and group transition tables */
42749+
42750+ if (s_tmp->user_trans_num) {
42751+ uid_t *uidlist;
42752+
42753+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42754+ if (uidlist == NULL)
42755+ return ERR_PTR(-ENOMEM);
42756+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42757+ return ERR_PTR(-EFAULT);
42758+
42759+ s_tmp->user_transitions = uidlist;
42760+ }
42761+
42762+ if (s_tmp->group_trans_num) {
42763+ gid_t *gidlist;
42764+
42765+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42766+ if (gidlist == NULL)
42767+ return ERR_PTR(-ENOMEM);
42768+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42769+ return ERR_PTR(-EFAULT);
42770+
42771+ s_tmp->group_transitions = gidlist;
42772+ }
42773+
42774+ /* set up object hash table */
42775+ num_objs = count_user_objs(ghash.first);
42776+
42777+ s_tmp->obj_hash_size = num_objs;
42778+ s_tmp->obj_hash =
42779+ (struct acl_object_label **)
42780+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42781+
42782+ if (!s_tmp->obj_hash)
42783+ return ERR_PTR(-ENOMEM);
42784+
42785+ memset(s_tmp->obj_hash, 0,
42786+ s_tmp->obj_hash_size *
42787+ sizeof (struct acl_object_label *));
42788+
42789+ /* add in objects */
42790+ err = copy_user_objs(ghash.first, s_tmp, role);
42791+
42792+ if (err)
42793+ return ERR_PTR(err);
42794+
42795+ /* set pointer for parent subject */
42796+ if (s_tmp->parent_subject) {
42797+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42798+
42799+ if (IS_ERR(s_tmp2))
42800+ return s_tmp2;
42801+
42802+ s_tmp->parent_subject = s_tmp2;
42803+ }
42804+
42805+ /* add in ip acls */
42806+
42807+ if (!s_tmp->ip_num) {
42808+ s_tmp->ips = NULL;
42809+ goto insert;
42810+ }
42811+
42812+ i_tmp =
42813+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42814+ sizeof (struct acl_ip_label *));
42815+
42816+ if (!i_tmp)
42817+ return ERR_PTR(-ENOMEM);
42818+
42819+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42820+ *(i_tmp + i_num) =
42821+ (struct acl_ip_label *)
42822+ acl_alloc(sizeof (struct acl_ip_label));
42823+ if (!*(i_tmp + i_num))
42824+ return ERR_PTR(-ENOMEM);
42825+
42826+ if (copy_from_user
42827+ (&i_utmp2, s_tmp->ips + i_num,
42828+ sizeof (struct acl_ip_label *)))
42829+ return ERR_PTR(-EFAULT);
42830+
42831+ if (copy_from_user
42832+ (*(i_tmp + i_num), i_utmp2,
42833+ sizeof (struct acl_ip_label)))
42834+ return ERR_PTR(-EFAULT);
42835+
42836+ if ((*(i_tmp + i_num))->iface == NULL)
42837+ continue;
42838+
42839+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42840+ if (!len || len >= IFNAMSIZ)
42841+ return ERR_PTR(-EINVAL);
42842+ tmp = acl_alloc(len);
42843+ if (tmp == NULL)
42844+ return ERR_PTR(-ENOMEM);
42845+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42846+ return ERR_PTR(-EFAULT);
42847+ (*(i_tmp + i_num))->iface = tmp;
42848+ }
42849+
42850+ s_tmp->ips = i_tmp;
42851+
42852+insert:
42853+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42854+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42855+ return ERR_PTR(-ENOMEM);
42856+
42857+ return s_tmp;
42858+}
42859+
42860+static int
42861+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42862+{
42863+ struct acl_subject_label s_pre;
42864+ struct acl_subject_label * ret;
42865+ int err;
42866+
42867+ while (userp) {
42868+ if (copy_from_user(&s_pre, userp,
42869+ sizeof (struct acl_subject_label)))
42870+ return -EFAULT;
42871+
42872+ /* do not add nested subjects here, add
42873+ while parsing objects
42874+ */
42875+
42876+ if (s_pre.mode & GR_NESTED) {
42877+ userp = s_pre.prev;
42878+ continue;
42879+ }
42880+
42881+ ret = do_copy_user_subj(userp, role);
42882+
42883+ err = PTR_ERR(ret);
42884+ if (IS_ERR(ret))
42885+ return err;
42886+
42887+ insert_acl_subj_label(ret, role);
42888+
42889+ userp = s_pre.prev;
42890+ }
42891+
42892+ return 0;
42893+}
42894+
42895+static int
42896+copy_user_acl(struct gr_arg *arg)
42897+{
42898+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42899+ struct sprole_pw *sptmp;
42900+ struct gr_hash_struct *ghash;
42901+ uid_t *domainlist;
42902+ unsigned int r_num;
42903+ unsigned int len;
42904+ char *tmp;
42905+ int err = 0;
42906+ __u16 i;
42907+ __u32 num_subjs;
42908+
42909+ /* we need a default and kernel role */
42910+ if (arg->role_db.num_roles < 2)
42911+ return -EINVAL;
42912+
42913+ /* copy special role authentication info from userspace */
42914+
42915+ num_sprole_pws = arg->num_sprole_pws;
42916+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42917+
42918+ if (!acl_special_roles) {
42919+ err = -ENOMEM;
42920+ goto cleanup;
42921+ }
42922+
42923+ for (i = 0; i < num_sprole_pws; i++) {
42924+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42925+ if (!sptmp) {
42926+ err = -ENOMEM;
42927+ goto cleanup;
42928+ }
42929+ if (copy_from_user(sptmp, arg->sprole_pws + i,
42930+ sizeof (struct sprole_pw))) {
42931+ err = -EFAULT;
42932+ goto cleanup;
42933+ }
42934+
42935+ len =
42936+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42937+
42938+ if (!len || len >= GR_SPROLE_LEN) {
42939+ err = -EINVAL;
42940+ goto cleanup;
42941+ }
42942+
42943+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42944+ err = -ENOMEM;
42945+ goto cleanup;
42946+ }
42947+
42948+ if (copy_from_user(tmp, sptmp->rolename, len)) {
42949+ err = -EFAULT;
42950+ goto cleanup;
42951+ }
42952+ tmp[len-1] = '\0';
42953+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42954+ printk(KERN_ALERT "Copying special role %s\n", tmp);
42955+#endif
42956+ sptmp->rolename = tmp;
42957+ acl_special_roles[i] = sptmp;
42958+ }
42959+
42960+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42961+
42962+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42963+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
42964+
42965+ if (!r_tmp) {
42966+ err = -ENOMEM;
42967+ goto cleanup;
42968+ }
42969+
42970+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
42971+ sizeof (struct acl_role_label *))) {
42972+ err = -EFAULT;
42973+ goto cleanup;
42974+ }
42975+
42976+ if (copy_from_user(r_tmp, r_utmp2,
42977+ sizeof (struct acl_role_label))) {
42978+ err = -EFAULT;
42979+ goto cleanup;
42980+ }
42981+
42982+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42983+
42984+ if (!len || len >= PATH_MAX) {
42985+ err = -EINVAL;
42986+ goto cleanup;
42987+ }
42988+
42989+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42990+ err = -ENOMEM;
42991+ goto cleanup;
42992+ }
42993+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
42994+ err = -EFAULT;
42995+ goto cleanup;
42996+ }
42997+ tmp[len-1] = '\0';
42998+ r_tmp->rolename = tmp;
42999+
43000+ if (!strcmp(r_tmp->rolename, "default")
43001+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
43002+ default_role = r_tmp;
43003+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
43004+ kernel_role = r_tmp;
43005+ }
43006+
43007+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
43008+ err = -ENOMEM;
43009+ goto cleanup;
43010+ }
43011+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
43012+ err = -EFAULT;
43013+ goto cleanup;
43014+ }
43015+
43016+ r_tmp->hash = ghash;
43017+
43018+ num_subjs = count_user_subjs(r_tmp->hash->first);
43019+
43020+ r_tmp->subj_hash_size = num_subjs;
43021+ r_tmp->subj_hash =
43022+ (struct acl_subject_label **)
43023+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
43024+
43025+ if (!r_tmp->subj_hash) {
43026+ err = -ENOMEM;
43027+ goto cleanup;
43028+ }
43029+
43030+ err = copy_user_allowedips(r_tmp);
43031+ if (err)
43032+ goto cleanup;
43033+
43034+ /* copy domain info */
43035+ if (r_tmp->domain_children != NULL) {
43036+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
43037+ if (domainlist == NULL) {
43038+ err = -ENOMEM;
43039+ goto cleanup;
43040+ }
43041+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
43042+ err = -EFAULT;
43043+ goto cleanup;
43044+ }
43045+ r_tmp->domain_children = domainlist;
43046+ }
43047+
43048+ err = copy_user_transitions(r_tmp);
43049+ if (err)
43050+ goto cleanup;
43051+
43052+ memset(r_tmp->subj_hash, 0,
43053+ r_tmp->subj_hash_size *
43054+ sizeof (struct acl_subject_label *));
43055+
43056+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
43057+
43058+ if (err)
43059+ goto cleanup;
43060+
43061+ /* set nested subject list to null */
43062+ r_tmp->hash->first = NULL;
43063+
43064+ insert_acl_role_label(r_tmp);
43065+ }
43066+
43067+ goto return_err;
43068+ cleanup:
43069+ free_variables();
43070+ return_err:
43071+ return err;
43072+
43073+}
43074+
43075+static int
43076+gracl_init(struct gr_arg *args)
43077+{
43078+ int error = 0;
43079+
43080+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43081+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43082+
43083+ if (init_variables(args)) {
43084+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43085+ error = -ENOMEM;
43086+ free_variables();
43087+ goto out;
43088+ }
43089+
43090+ error = copy_user_acl(args);
43091+ free_init_variables();
43092+ if (error) {
43093+ free_variables();
43094+ goto out;
43095+ }
43096+
43097+ if ((error = gr_set_acls(0))) {
43098+ free_variables();
43099+ goto out;
43100+ }
43101+
43102+ pax_open_kernel();
43103+ gr_status |= GR_READY;
43104+ pax_close_kernel();
43105+
43106+ out:
43107+ return error;
43108+}
43109+
43110+/* derived from glibc fnmatch() 0: match, 1: no match*/
43111+
43112+static int
43113+glob_match(const char *p, const char *n)
43114+{
43115+ char c;
43116+
43117+ while ((c = *p++) != '\0') {
43118+ switch (c) {
43119+ case '?':
43120+ if (*n == '\0')
43121+ return 1;
43122+ else if (*n == '/')
43123+ return 1;
43124+ break;
43125+ case '\\':
43126+ if (*n != c)
43127+ return 1;
43128+ break;
43129+ case '*':
43130+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
43131+ if (*n == '/')
43132+ return 1;
43133+ else if (c == '?') {
43134+ if (*n == '\0')
43135+ return 1;
43136+ else
43137+ ++n;
43138+ }
43139+ }
43140+ if (c == '\0') {
43141+ return 0;
43142+ } else {
43143+ const char *endp;
43144+
43145+ if ((endp = strchr(n, '/')) == NULL)
43146+ endp = n + strlen(n);
43147+
43148+ if (c == '[') {
43149+ for (--p; n < endp; ++n)
43150+ if (!glob_match(p, n))
43151+ return 0;
43152+ } else if (c == '/') {
43153+ while (*n != '\0' && *n != '/')
43154+ ++n;
43155+ if (*n == '/' && !glob_match(p, n + 1))
43156+ return 0;
43157+ } else {
43158+ for (--p; n < endp; ++n)
43159+ if (*n == c && !glob_match(p, n))
43160+ return 0;
43161+ }
43162+
43163+ return 1;
43164+ }
43165+ case '[':
43166+ {
43167+ int not;
43168+ char cold;
43169+
43170+ if (*n == '\0' || *n == '/')
43171+ return 1;
43172+
43173+ not = (*p == '!' || *p == '^');
43174+ if (not)
43175+ ++p;
43176+
43177+ c = *p++;
43178+ for (;;) {
43179+ unsigned char fn = (unsigned char)*n;
43180+
43181+ if (c == '\0')
43182+ return 1;
43183+ else {
43184+ if (c == fn)
43185+ goto matched;
43186+ cold = c;
43187+ c = *p++;
43188+
43189+ if (c == '-' && *p != ']') {
43190+ unsigned char cend = *p++;
43191+
43192+ if (cend == '\0')
43193+ return 1;
43194+
43195+ if (cold <= fn && fn <= cend)
43196+ goto matched;
43197+
43198+ c = *p++;
43199+ }
43200+ }
43201+
43202+ if (c == ']')
43203+ break;
43204+ }
43205+ if (!not)
43206+ return 1;
43207+ break;
43208+ matched:
43209+ while (c != ']') {
43210+ if (c == '\0')
43211+ return 1;
43212+
43213+ c = *p++;
43214+ }
43215+ if (not)
43216+ return 1;
43217+ }
43218+ break;
43219+ default:
43220+ if (c != *n)
43221+ return 1;
43222+ }
43223+
43224+ ++n;
43225+ }
43226+
43227+ if (*n == '\0')
43228+ return 0;
43229+
43230+ if (*n == '/')
43231+ return 0;
43232+
43233+ return 1;
43234+}
43235+
43236+static struct acl_object_label *
43237+chk_glob_label(struct acl_object_label *globbed,
43238+ struct dentry *dentry, struct vfsmount *mnt, char **path)
43239+{
43240+ struct acl_object_label *tmp;
43241+
43242+ if (*path == NULL)
43243+ *path = gr_to_filename_nolock(dentry, mnt);
43244+
43245+ tmp = globbed;
43246+
43247+ while (tmp) {
43248+ if (!glob_match(tmp->filename, *path))
43249+ return tmp;
43250+ tmp = tmp->next;
43251+ }
43252+
43253+ return NULL;
43254+}
43255+
43256+static struct acl_object_label *
43257+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43258+ const ino_t curr_ino, const dev_t curr_dev,
43259+ const struct acl_subject_label *subj, char **path, const int checkglob)
43260+{
43261+ struct acl_subject_label *tmpsubj;
43262+ struct acl_object_label *retval;
43263+ struct acl_object_label *retval2;
43264+
43265+ tmpsubj = (struct acl_subject_label *) subj;
43266+ read_lock(&gr_inode_lock);
43267+ do {
43268+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43269+ if (retval) {
43270+ if (checkglob && retval->globbed) {
43271+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43272+ (struct vfsmount *)orig_mnt, path);
43273+ if (retval2)
43274+ retval = retval2;
43275+ }
43276+ break;
43277+ }
43278+ } while ((tmpsubj = tmpsubj->parent_subject));
43279+ read_unlock(&gr_inode_lock);
43280+
43281+ return retval;
43282+}
43283+
43284+static __inline__ struct acl_object_label *
43285+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43286+ struct dentry *curr_dentry,
43287+ const struct acl_subject_label *subj, char **path, const int checkglob)
43288+{
43289+ int newglob = checkglob;
43290+ ino_t inode;
43291+ dev_t device;
43292+
43293+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43294+ as we don't want a / * rule to match instead of the / object
43295+ don't do this for create lookups that call this function though, since they're looking up
43296+ on the parent and thus need globbing checks on all paths
43297+ */
43298+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43299+ newglob = GR_NO_GLOB;
43300+
43301+ spin_lock(&curr_dentry->d_lock);
43302+ inode = curr_dentry->d_inode->i_ino;
43303+ device = __get_dev(curr_dentry);
43304+ spin_unlock(&curr_dentry->d_lock);
43305+
43306+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43307+}
43308+
43309+static struct acl_object_label *
43310+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43311+ const struct acl_subject_label *subj, char *path, const int checkglob)
43312+{
43313+ struct dentry *dentry = (struct dentry *) l_dentry;
43314+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43315+ struct acl_object_label *retval;
43316+ struct dentry *parent;
43317+
43318+ write_seqlock(&rename_lock);
43319+ br_read_lock(vfsmount_lock);
43320+
43321+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43322+#ifdef CONFIG_NET
43323+ mnt == sock_mnt ||
43324+#endif
43325+#ifdef CONFIG_HUGETLBFS
43326+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43327+#endif
43328+ /* ignore Eric Biederman */
43329+ IS_PRIVATE(l_dentry->d_inode))) {
43330+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43331+ goto out;
43332+ }
43333+
43334+ for (;;) {
43335+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43336+ break;
43337+
43338+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43339+ if (mnt->mnt_parent == mnt)
43340+ break;
43341+
43342+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43343+ if (retval != NULL)
43344+ goto out;
43345+
43346+ dentry = mnt->mnt_mountpoint;
43347+ mnt = mnt->mnt_parent;
43348+ continue;
43349+ }
43350+
43351+ parent = dentry->d_parent;
43352+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43353+ if (retval != NULL)
43354+ goto out;
43355+
43356+ dentry = parent;
43357+ }
43358+
43359+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43360+
43361+ /* real_root is pinned so we don't have to hold a reference */
43362+ if (retval == NULL)
43363+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43364+out:
43365+ br_read_unlock(vfsmount_lock);
43366+ write_sequnlock(&rename_lock);
43367+
43368+ BUG_ON(retval == NULL);
43369+
43370+ return retval;
43371+}
43372+
43373+static __inline__ struct acl_object_label *
43374+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43375+ const struct acl_subject_label *subj)
43376+{
43377+ char *path = NULL;
43378+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43379+}
43380+
43381+static __inline__ struct acl_object_label *
43382+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43383+ const struct acl_subject_label *subj)
43384+{
43385+ char *path = NULL;
43386+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43387+}
43388+
43389+static __inline__ struct acl_object_label *
43390+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43391+ const struct acl_subject_label *subj, char *path)
43392+{
43393+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43394+}
43395+
43396+static struct acl_subject_label *
43397+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43398+ const struct acl_role_label *role)
43399+{
43400+ struct dentry *dentry = (struct dentry *) l_dentry;
43401+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43402+ struct acl_subject_label *retval;
43403+ struct dentry *parent;
43404+
43405+ write_seqlock(&rename_lock);
43406+ br_read_lock(vfsmount_lock);
43407+
43408+ for (;;) {
43409+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43410+ break;
43411+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43412+ if (mnt->mnt_parent == mnt)
43413+ break;
43414+
43415+ spin_lock(&dentry->d_lock);
43416+ read_lock(&gr_inode_lock);
43417+ retval =
43418+ lookup_acl_subj_label(dentry->d_inode->i_ino,
43419+ __get_dev(dentry), role);
43420+ read_unlock(&gr_inode_lock);
43421+ spin_unlock(&dentry->d_lock);
43422+ if (retval != NULL)
43423+ goto out;
43424+
43425+ dentry = mnt->mnt_mountpoint;
43426+ mnt = mnt->mnt_parent;
43427+ continue;
43428+ }
43429+
43430+ spin_lock(&dentry->d_lock);
43431+ read_lock(&gr_inode_lock);
43432+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43433+ __get_dev(dentry), role);
43434+ read_unlock(&gr_inode_lock);
43435+ parent = dentry->d_parent;
43436+ spin_unlock(&dentry->d_lock);
43437+
43438+ if (retval != NULL)
43439+ goto out;
43440+
43441+ dentry = parent;
43442+ }
43443+
43444+ spin_lock(&dentry->d_lock);
43445+ read_lock(&gr_inode_lock);
43446+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43447+ __get_dev(dentry), role);
43448+ read_unlock(&gr_inode_lock);
43449+ spin_unlock(&dentry->d_lock);
43450+
43451+ if (unlikely(retval == NULL)) {
43452+ /* real_root is pinned, we don't need to hold a reference */
43453+ read_lock(&gr_inode_lock);
43454+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43455+ __get_dev(real_root.dentry), role);
43456+ read_unlock(&gr_inode_lock);
43457+ }
43458+out:
43459+ br_read_unlock(vfsmount_lock);
43460+ write_sequnlock(&rename_lock);
43461+
43462+ BUG_ON(retval == NULL);
43463+
43464+ return retval;
43465+}
43466+
43467+static void
43468+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43469+{
43470+ struct task_struct *task = current;
43471+ const struct cred *cred = current_cred();
43472+
43473+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43474+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43475+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43476+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43477+
43478+ return;
43479+}
43480+
43481+static void
43482+gr_log_learn_sysctl(const char *path, const __u32 mode)
43483+{
43484+ struct task_struct *task = current;
43485+ const struct cred *cred = current_cred();
43486+
43487+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43488+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43489+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43490+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43491+
43492+ return;
43493+}
43494+
43495+static void
43496+gr_log_learn_id_change(const char type, const unsigned int real,
43497+ const unsigned int effective, const unsigned int fs)
43498+{
43499+ struct task_struct *task = current;
43500+ const struct cred *cred = current_cred();
43501+
43502+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43503+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43504+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43505+ type, real, effective, fs, &task->signal->saved_ip);
43506+
43507+ return;
43508+}
43509+
43510+__u32
43511+gr_check_link(const struct dentry * new_dentry,
43512+ const struct dentry * parent_dentry,
43513+ const struct vfsmount * parent_mnt,
43514+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43515+{
43516+ struct acl_object_label *obj;
43517+ __u32 oldmode, newmode;
43518+ __u32 needmode;
43519+
43520+ if (unlikely(!(gr_status & GR_READY)))
43521+ return (GR_CREATE | GR_LINK);
43522+
43523+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43524+ oldmode = obj->mode;
43525+
43526+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43527+ oldmode |= (GR_CREATE | GR_LINK);
43528+
43529+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43530+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43531+ needmode |= GR_SETID | GR_AUDIT_SETID;
43532+
43533+ newmode =
43534+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
43535+ oldmode | needmode);
43536+
43537+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43538+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43539+ GR_INHERIT | GR_AUDIT_INHERIT);
43540+
43541+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43542+ goto bad;
43543+
43544+ if ((oldmode & needmode) != needmode)
43545+ goto bad;
43546+
43547+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43548+ if ((newmode & needmode) != needmode)
43549+ goto bad;
43550+
43551+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43552+ return newmode;
43553+bad:
43554+ needmode = oldmode;
43555+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43556+ needmode |= GR_SETID;
43557+
43558+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43559+ gr_log_learn(old_dentry, old_mnt, needmode);
43560+ return (GR_CREATE | GR_LINK);
43561+ } else if (newmode & GR_SUPPRESS)
43562+ return GR_SUPPRESS;
43563+ else
43564+ return 0;
43565+}
43566+
43567+__u32
43568+gr_search_file(const struct dentry * dentry, const __u32 mode,
43569+ const struct vfsmount * mnt)
43570+{
43571+ __u32 retval = mode;
43572+ struct acl_subject_label *curracl;
43573+ struct acl_object_label *currobj;
43574+
43575+ if (unlikely(!(gr_status & GR_READY)))
43576+ return (mode & ~GR_AUDITS);
43577+
43578+ curracl = current->acl;
43579+
43580+ currobj = chk_obj_label(dentry, mnt, curracl);
43581+ retval = currobj->mode & mode;
43582+
43583+ /* if we're opening a specified transfer file for writing
43584+ (e.g. /dev/initctl), then transfer our role to init
43585+ */
43586+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43587+ current->role->roletype & GR_ROLE_PERSIST)) {
43588+ struct task_struct *task = init_pid_ns.child_reaper;
43589+
43590+ if (task->role != current->role) {
43591+ task->acl_sp_role = 0;
43592+ task->acl_role_id = current->acl_role_id;
43593+ task->role = current->role;
43594+ rcu_read_lock();
43595+ read_lock(&grsec_exec_file_lock);
43596+ gr_apply_subject_to_task(task);
43597+ read_unlock(&grsec_exec_file_lock);
43598+ rcu_read_unlock();
43599+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43600+ }
43601+ }
43602+
43603+ if (unlikely
43604+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43605+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43606+ __u32 new_mode = mode;
43607+
43608+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43609+
43610+ retval = new_mode;
43611+
43612+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43613+ new_mode |= GR_INHERIT;
43614+
43615+ if (!(mode & GR_NOLEARN))
43616+ gr_log_learn(dentry, mnt, new_mode);
43617+ }
43618+
43619+ return retval;
43620+}
43621+
43622+__u32
43623+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43624+ const struct vfsmount * mnt, const __u32 mode)
43625+{
43626+ struct name_entry *match;
43627+ struct acl_object_label *matchpo;
43628+ struct acl_subject_label *curracl;
43629+ char *path;
43630+ __u32 retval;
43631+
43632+ if (unlikely(!(gr_status & GR_READY)))
43633+ return (mode & ~GR_AUDITS);
43634+
43635+ preempt_disable();
43636+ path = gr_to_filename_rbac(new_dentry, mnt);
43637+ match = lookup_name_entry_create(path);
43638+
43639+ if (!match)
43640+ goto check_parent;
43641+
43642+ curracl = current->acl;
43643+
43644+ read_lock(&gr_inode_lock);
43645+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43646+ read_unlock(&gr_inode_lock);
43647+
43648+ if (matchpo) {
43649+ if ((matchpo->mode & mode) !=
43650+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
43651+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43652+ __u32 new_mode = mode;
43653+
43654+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43655+
43656+ gr_log_learn(new_dentry, mnt, new_mode);
43657+
43658+ preempt_enable();
43659+ return new_mode;
43660+ }
43661+ preempt_enable();
43662+ return (matchpo->mode & mode);
43663+ }
43664+
43665+ check_parent:
43666+ curracl = current->acl;
43667+
43668+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43669+ retval = matchpo->mode & mode;
43670+
43671+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43672+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43673+ __u32 new_mode = mode;
43674+
43675+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43676+
43677+ gr_log_learn(new_dentry, mnt, new_mode);
43678+ preempt_enable();
43679+ return new_mode;
43680+ }
43681+
43682+ preempt_enable();
43683+ return retval;
43684+}
43685+
43686+int
43687+gr_check_hidden_task(const struct task_struct *task)
43688+{
43689+ if (unlikely(!(gr_status & GR_READY)))
43690+ return 0;
43691+
43692+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43693+ return 1;
43694+
43695+ return 0;
43696+}
43697+
43698+int
43699+gr_check_protected_task(const struct task_struct *task)
43700+{
43701+ if (unlikely(!(gr_status & GR_READY) || !task))
43702+ return 0;
43703+
43704+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43705+ task->acl != current->acl)
43706+ return 1;
43707+
43708+ return 0;
43709+}
43710+
43711+int
43712+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43713+{
43714+ struct task_struct *p;
43715+ int ret = 0;
43716+
43717+ if (unlikely(!(gr_status & GR_READY) || !pid))
43718+ return ret;
43719+
43720+ read_lock(&tasklist_lock);
43721+ do_each_pid_task(pid, type, p) {
43722+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43723+ p->acl != current->acl) {
43724+ ret = 1;
43725+ goto out;
43726+ }
43727+ } while_each_pid_task(pid, type, p);
43728+out:
43729+ read_unlock(&tasklist_lock);
43730+
43731+ return ret;
43732+}
43733+
43734+void
43735+gr_copy_label(struct task_struct *tsk)
43736+{
43737+ tsk->signal->used_accept = 0;
43738+ tsk->acl_sp_role = 0;
43739+ tsk->acl_role_id = current->acl_role_id;
43740+ tsk->acl = current->acl;
43741+ tsk->role = current->role;
43742+ tsk->signal->curr_ip = current->signal->curr_ip;
43743+ tsk->signal->saved_ip = current->signal->saved_ip;
43744+ if (current->exec_file)
43745+ get_file(current->exec_file);
43746+ tsk->exec_file = current->exec_file;
43747+ tsk->is_writable = current->is_writable;
43748+ if (unlikely(current->signal->used_accept)) {
43749+ current->signal->curr_ip = 0;
43750+ current->signal->saved_ip = 0;
43751+ }
43752+
43753+ return;
43754+}
43755+
43756+static void
43757+gr_set_proc_res(struct task_struct *task)
43758+{
43759+ struct acl_subject_label *proc;
43760+ unsigned short i;
43761+
43762+ proc = task->acl;
43763+
43764+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43765+ return;
43766+
43767+ for (i = 0; i < RLIM_NLIMITS; i++) {
43768+ if (!(proc->resmask & (1 << i)))
43769+ continue;
43770+
43771+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43772+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43773+ }
43774+
43775+ return;
43776+}
43777+
43778+extern int __gr_process_user_ban(struct user_struct *user);
43779+
43780+int
43781+gr_check_user_change(int real, int effective, int fs)
43782+{
43783+ unsigned int i;
43784+ __u16 num;
43785+ uid_t *uidlist;
43786+ int curuid;
43787+ int realok = 0;
43788+ int effectiveok = 0;
43789+ int fsok = 0;
43790+
43791+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43792+ struct user_struct *user;
43793+
43794+ if (real == -1)
43795+ goto skipit;
43796+
43797+ user = find_user(real);
43798+ if (user == NULL)
43799+ goto skipit;
43800+
43801+ if (__gr_process_user_ban(user)) {
43802+ /* for find_user */
43803+ free_uid(user);
43804+ return 1;
43805+ }
43806+
43807+ /* for find_user */
43808+ free_uid(user);
43809+
43810+skipit:
43811+#endif
43812+
43813+ if (unlikely(!(gr_status & GR_READY)))
43814+ return 0;
43815+
43816+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43817+ gr_log_learn_id_change('u', real, effective, fs);
43818+
43819+ num = current->acl->user_trans_num;
43820+ uidlist = current->acl->user_transitions;
43821+
43822+ if (uidlist == NULL)
43823+ return 0;
43824+
43825+ if (real == -1)
43826+ realok = 1;
43827+ if (effective == -1)
43828+ effectiveok = 1;
43829+ if (fs == -1)
43830+ fsok = 1;
43831+
43832+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
43833+ for (i = 0; i < num; i++) {
43834+ curuid = (int)uidlist[i];
43835+ if (real == curuid)
43836+ realok = 1;
43837+ if (effective == curuid)
43838+ effectiveok = 1;
43839+ if (fs == curuid)
43840+ fsok = 1;
43841+ }
43842+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
43843+ for (i = 0; i < num; i++) {
43844+ curuid = (int)uidlist[i];
43845+ if (real == curuid)
43846+ break;
43847+ if (effective == curuid)
43848+ break;
43849+ if (fs == curuid)
43850+ break;
43851+ }
43852+ /* not in deny list */
43853+ if (i == num) {
43854+ realok = 1;
43855+ effectiveok = 1;
43856+ fsok = 1;
43857+ }
43858+ }
43859+
43860+ if (realok && effectiveok && fsok)
43861+ return 0;
43862+ else {
43863+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43864+ return 1;
43865+ }
43866+}
43867+
43868+int
43869+gr_check_group_change(int real, int effective, int fs)
43870+{
43871+ unsigned int i;
43872+ __u16 num;
43873+ gid_t *gidlist;
43874+ int curgid;
43875+ int realok = 0;
43876+ int effectiveok = 0;
43877+ int fsok = 0;
43878+
43879+ if (unlikely(!(gr_status & GR_READY)))
43880+ return 0;
43881+
43882+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43883+ gr_log_learn_id_change('g', real, effective, fs);
43884+
43885+ num = current->acl->group_trans_num;
43886+ gidlist = current->acl->group_transitions;
43887+
43888+ if (gidlist == NULL)
43889+ return 0;
43890+
43891+ if (real == -1)
43892+ realok = 1;
43893+ if (effective == -1)
43894+ effectiveok = 1;
43895+ if (fs == -1)
43896+ fsok = 1;
43897+
43898+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
43899+ for (i = 0; i < num; i++) {
43900+ curgid = (int)gidlist[i];
43901+ if (real == curgid)
43902+ realok = 1;
43903+ if (effective == curgid)
43904+ effectiveok = 1;
43905+ if (fs == curgid)
43906+ fsok = 1;
43907+ }
43908+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
43909+ for (i = 0; i < num; i++) {
43910+ curgid = (int)gidlist[i];
43911+ if (real == curgid)
43912+ break;
43913+ if (effective == curgid)
43914+ break;
43915+ if (fs == curgid)
43916+ break;
43917+ }
43918+ /* not in deny list */
43919+ if (i == num) {
43920+ realok = 1;
43921+ effectiveok = 1;
43922+ fsok = 1;
43923+ }
43924+ }
43925+
43926+ if (realok && effectiveok && fsok)
43927+ return 0;
43928+ else {
43929+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43930+ return 1;
43931+ }
43932+}
43933+
43934+void
43935+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43936+{
43937+ struct acl_role_label *role = task->role;
43938+ struct acl_subject_label *subj = NULL;
43939+ struct acl_object_label *obj;
43940+ struct file *filp;
43941+
43942+ if (unlikely(!(gr_status & GR_READY)))
43943+ return;
43944+
43945+ filp = task->exec_file;
43946+
43947+ /* kernel process, we'll give them the kernel role */
43948+ if (unlikely(!filp)) {
43949+ task->role = kernel_role;
43950+ task->acl = kernel_role->root_label;
43951+ return;
43952+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43953+ role = lookup_acl_role_label(task, uid, gid);
43954+
43955+ /* perform subject lookup in possibly new role
43956+ we can use this result below in the case where role == task->role
43957+ */
43958+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43959+
43960+ /* if we changed uid/gid, but result in the same role
43961+ and are using inheritance, don't lose the inherited subject
43962+ if current subject is other than what normal lookup
43963+ would result in, we arrived via inheritance, don't
43964+ lose subject
43965+ */
43966+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43967+ (subj == task->acl)))
43968+ task->acl = subj;
43969+
43970+ task->role = role;
43971+
43972+ task->is_writable = 0;
43973+
43974+ /* ignore additional mmap checks for processes that are writable
43975+ by the default ACL */
43976+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43977+ if (unlikely(obj->mode & GR_WRITE))
43978+ task->is_writable = 1;
43979+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43980+ if (unlikely(obj->mode & GR_WRITE))
43981+ task->is_writable = 1;
43982+
43983+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43984+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43985+#endif
43986+
43987+ gr_set_proc_res(task);
43988+
43989+ return;
43990+}
43991+
43992+int
43993+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43994+ const int unsafe_share)
43995+{
43996+ struct task_struct *task = current;
43997+ struct acl_subject_label *newacl;
43998+ struct acl_object_label *obj;
43999+ __u32 retmode;
44000+
44001+ if (unlikely(!(gr_status & GR_READY)))
44002+ return 0;
44003+
44004+ newacl = chk_subj_label(dentry, mnt, task->role);
44005+
44006+ task_lock(task);
44007+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
44008+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
44009+ !(task->role->roletype & GR_ROLE_GOD) &&
44010+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
44011+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
44012+ task_unlock(task);
44013+ if (unsafe_share)
44014+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
44015+ else
44016+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
44017+ return -EACCES;
44018+ }
44019+ task_unlock(task);
44020+
44021+ obj = chk_obj_label(dentry, mnt, task->acl);
44022+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
44023+
44024+ if (!(task->acl->mode & GR_INHERITLEARN) &&
44025+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
44026+ if (obj->nested)
44027+ task->acl = obj->nested;
44028+ else
44029+ task->acl = newacl;
44030+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
44031+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
44032+
44033+ task->is_writable = 0;
44034+
44035+ /* ignore additional mmap checks for processes that are writable
44036+ by the default ACL */
44037+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
44038+ if (unlikely(obj->mode & GR_WRITE))
44039+ task->is_writable = 1;
44040+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
44041+ if (unlikely(obj->mode & GR_WRITE))
44042+ task->is_writable = 1;
44043+
44044+ gr_set_proc_res(task);
44045+
44046+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44047+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44048+#endif
44049+ return 0;
44050+}
44051+
44052+/* always called with valid inodev ptr */
44053+static void
44054+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
44055+{
44056+ struct acl_object_label *matchpo;
44057+ struct acl_subject_label *matchps;
44058+ struct acl_subject_label *subj;
44059+ struct acl_role_label *role;
44060+ unsigned int x;
44061+
44062+ FOR_EACH_ROLE_START(role)
44063+ FOR_EACH_SUBJECT_START(role, subj, x)
44064+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
44065+ matchpo->mode |= GR_DELETED;
44066+ FOR_EACH_SUBJECT_END(subj,x)
44067+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
44068+ if (subj->inode == ino && subj->device == dev)
44069+ subj->mode |= GR_DELETED;
44070+ FOR_EACH_NESTED_SUBJECT_END(subj)
44071+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
44072+ matchps->mode |= GR_DELETED;
44073+ FOR_EACH_ROLE_END(role)
44074+
44075+ inodev->nentry->deleted = 1;
44076+
44077+ return;
44078+}
44079+
44080+void
44081+gr_handle_delete(const ino_t ino, const dev_t dev)
44082+{
44083+ struct inodev_entry *inodev;
44084+
44085+ if (unlikely(!(gr_status & GR_READY)))
44086+ return;
44087+
44088+ write_lock(&gr_inode_lock);
44089+ inodev = lookup_inodev_entry(ino, dev);
44090+ if (inodev != NULL)
44091+ do_handle_delete(inodev, ino, dev);
44092+ write_unlock(&gr_inode_lock);
44093+
44094+ return;
44095+}
44096+
44097+static void
44098+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44099+ const ino_t newinode, const dev_t newdevice,
44100+ struct acl_subject_label *subj)
44101+{
44102+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44103+ struct acl_object_label *match;
44104+
44105+ match = subj->obj_hash[index];
44106+
44107+ while (match && (match->inode != oldinode ||
44108+ match->device != olddevice ||
44109+ !(match->mode & GR_DELETED)))
44110+ match = match->next;
44111+
44112+ if (match && (match->inode == oldinode)
44113+ && (match->device == olddevice)
44114+ && (match->mode & GR_DELETED)) {
44115+ if (match->prev == NULL) {
44116+ subj->obj_hash[index] = match->next;
44117+ if (match->next != NULL)
44118+ match->next->prev = NULL;
44119+ } else {
44120+ match->prev->next = match->next;
44121+ if (match->next != NULL)
44122+ match->next->prev = match->prev;
44123+ }
44124+ match->prev = NULL;
44125+ match->next = NULL;
44126+ match->inode = newinode;
44127+ match->device = newdevice;
44128+ match->mode &= ~GR_DELETED;
44129+
44130+ insert_acl_obj_label(match, subj);
44131+ }
44132+
44133+ return;
44134+}
44135+
44136+static void
44137+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44138+ const ino_t newinode, const dev_t newdevice,
44139+ struct acl_role_label *role)
44140+{
44141+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44142+ struct acl_subject_label *match;
44143+
44144+ match = role->subj_hash[index];
44145+
44146+ while (match && (match->inode != oldinode ||
44147+ match->device != olddevice ||
44148+ !(match->mode & GR_DELETED)))
44149+ match = match->next;
44150+
44151+ if (match && (match->inode == oldinode)
44152+ && (match->device == olddevice)
44153+ && (match->mode & GR_DELETED)) {
44154+ if (match->prev == NULL) {
44155+ role->subj_hash[index] = match->next;
44156+ if (match->next != NULL)
44157+ match->next->prev = NULL;
44158+ } else {
44159+ match->prev->next = match->next;
44160+ if (match->next != NULL)
44161+ match->next->prev = match->prev;
44162+ }
44163+ match->prev = NULL;
44164+ match->next = NULL;
44165+ match->inode = newinode;
44166+ match->device = newdevice;
44167+ match->mode &= ~GR_DELETED;
44168+
44169+ insert_acl_subj_label(match, role);
44170+ }
44171+
44172+ return;
44173+}
44174+
44175+static void
44176+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44177+ const ino_t newinode, const dev_t newdevice)
44178+{
44179+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44180+ struct inodev_entry *match;
44181+
44182+ match = inodev_set.i_hash[index];
44183+
44184+ while (match && (match->nentry->inode != oldinode ||
44185+ match->nentry->device != olddevice || !match->nentry->deleted))
44186+ match = match->next;
44187+
44188+ if (match && (match->nentry->inode == oldinode)
44189+ && (match->nentry->device == olddevice) &&
44190+ match->nentry->deleted) {
44191+ if (match->prev == NULL) {
44192+ inodev_set.i_hash[index] = match->next;
44193+ if (match->next != NULL)
44194+ match->next->prev = NULL;
44195+ } else {
44196+ match->prev->next = match->next;
44197+ if (match->next != NULL)
44198+ match->next->prev = match->prev;
44199+ }
44200+ match->prev = NULL;
44201+ match->next = NULL;
44202+ match->nentry->inode = newinode;
44203+ match->nentry->device = newdevice;
44204+ match->nentry->deleted = 0;
44205+
44206+ insert_inodev_entry(match);
44207+ }
44208+
44209+ return;
44210+}
44211+
44212+static void
44213+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44214+ const struct vfsmount *mnt)
44215+{
44216+ struct acl_subject_label *subj;
44217+ struct acl_role_label *role;
44218+ unsigned int x;
44219+ ino_t ino = dentry->d_inode->i_ino;
44220+ dev_t dev = __get_dev(dentry);
44221+
44222+ FOR_EACH_ROLE_START(role)
44223+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44224+
44225+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
44226+ if ((subj->inode == ino) && (subj->device == dev)) {
44227+ subj->inode = ino;
44228+ subj->device = dev;
44229+ }
44230+ FOR_EACH_NESTED_SUBJECT_END(subj)
44231+ FOR_EACH_SUBJECT_START(role, subj, x)
44232+ update_acl_obj_label(matchn->inode, matchn->device,
44233+ ino, dev, subj);
44234+ FOR_EACH_SUBJECT_END(subj,x)
44235+ FOR_EACH_ROLE_END(role)
44236+
44237+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44238+
44239+ return;
44240+}
44241+
44242+void
44243+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44244+{
44245+ struct name_entry *matchn;
44246+
44247+ if (unlikely(!(gr_status & GR_READY)))
44248+ return;
44249+
44250+ preempt_disable();
44251+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44252+
44253+ if (unlikely((unsigned long)matchn)) {
44254+ write_lock(&gr_inode_lock);
44255+ do_handle_create(matchn, dentry, mnt);
44256+ write_unlock(&gr_inode_lock);
44257+ }
44258+ preempt_enable();
44259+
44260+ return;
44261+}
44262+
44263+void
44264+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44265+ struct dentry *old_dentry,
44266+ struct dentry *new_dentry,
44267+ struct vfsmount *mnt, const __u8 replace)
44268+{
44269+ struct name_entry *matchn;
44270+ struct inodev_entry *inodev;
44271+ ino_t old_ino = old_dentry->d_inode->i_ino;
44272+ dev_t old_dev = __get_dev(old_dentry);
44273+
44274+ /* vfs_rename swaps the name and parent link for old_dentry and
44275+ new_dentry
44276+ at this point, old_dentry has the new name, parent link, and inode
44277+ for the renamed file
44278+ if a file is being replaced by a rename, new_dentry has the inode
44279+ and name for the replaced file
44280+ */
44281+
44282+ if (unlikely(!(gr_status & GR_READY)))
44283+ return;
44284+
44285+ preempt_disable();
44286+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44287+
44288+ /* we wouldn't have to check d_inode if it weren't for
44289+ NFS silly-renaming
44290+ */
44291+
44292+ write_lock(&gr_inode_lock);
44293+ if (unlikely(replace && new_dentry->d_inode)) {
44294+ ino_t new_ino = new_dentry->d_inode->i_ino;
44295+ dev_t new_dev = __get_dev(new_dentry);
44296+
44297+ inodev = lookup_inodev_entry(new_ino, new_dev);
44298+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44299+ do_handle_delete(inodev, new_ino, new_dev);
44300+ }
44301+
44302+ inodev = lookup_inodev_entry(old_ino, old_dev);
44303+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44304+ do_handle_delete(inodev, old_ino, old_dev);
44305+
44306+ if (unlikely((unsigned long)matchn))
44307+ do_handle_create(matchn, old_dentry, mnt);
44308+
44309+ write_unlock(&gr_inode_lock);
44310+ preempt_enable();
44311+
44312+ return;
44313+}
44314+
44315+static int
44316+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44317+ unsigned char **sum)
44318+{
44319+ struct acl_role_label *r;
44320+ struct role_allowed_ip *ipp;
44321+ struct role_transition *trans;
44322+ unsigned int i;
44323+ int found = 0;
44324+ u32 curr_ip = current->signal->curr_ip;
44325+
44326+ current->signal->saved_ip = curr_ip;
44327+
44328+ /* check transition table */
44329+
44330+ for (trans = current->role->transitions; trans; trans = trans->next) {
44331+ if (!strcmp(rolename, trans->rolename)) {
44332+ found = 1;
44333+ break;
44334+ }
44335+ }
44336+
44337+ if (!found)
44338+ return 0;
44339+
44340+ /* handle special roles that do not require authentication
44341+ and check ip */
44342+
44343+ FOR_EACH_ROLE_START(r)
44344+ if (!strcmp(rolename, r->rolename) &&
44345+ (r->roletype & GR_ROLE_SPECIAL)) {
44346+ found = 0;
44347+ if (r->allowed_ips != NULL) {
44348+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44349+ if ((ntohl(curr_ip) & ipp->netmask) ==
44350+ (ntohl(ipp->addr) & ipp->netmask))
44351+ found = 1;
44352+ }
44353+ } else
44354+ found = 2;
44355+ if (!found)
44356+ return 0;
44357+
44358+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44359+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44360+ *salt = NULL;
44361+ *sum = NULL;
44362+ return 1;
44363+ }
44364+ }
44365+ FOR_EACH_ROLE_END(r)
44366+
44367+ for (i = 0; i < num_sprole_pws; i++) {
44368+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44369+ *salt = acl_special_roles[i]->salt;
44370+ *sum = acl_special_roles[i]->sum;
44371+ return 1;
44372+ }
44373+ }
44374+
44375+ return 0;
44376+}
44377+
44378+static void
44379+assign_special_role(char *rolename)
44380+{
44381+ struct acl_object_label *obj;
44382+ struct acl_role_label *r;
44383+ struct acl_role_label *assigned = NULL;
44384+ struct task_struct *tsk;
44385+ struct file *filp;
44386+
44387+ FOR_EACH_ROLE_START(r)
44388+ if (!strcmp(rolename, r->rolename) &&
44389+ (r->roletype & GR_ROLE_SPECIAL)) {
44390+ assigned = r;
44391+ break;
44392+ }
44393+ FOR_EACH_ROLE_END(r)
44394+
44395+ if (!assigned)
44396+ return;
44397+
44398+ read_lock(&tasklist_lock);
44399+ read_lock(&grsec_exec_file_lock);
44400+
44401+ tsk = current->real_parent;
44402+ if (tsk == NULL)
44403+ goto out_unlock;
44404+
44405+ filp = tsk->exec_file;
44406+ if (filp == NULL)
44407+ goto out_unlock;
44408+
44409+ tsk->is_writable = 0;
44410+
44411+ tsk->acl_sp_role = 1;
44412+ tsk->acl_role_id = ++acl_sp_role_value;
44413+ tsk->role = assigned;
44414+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44415+
44416+ /* ignore additional mmap checks for processes that are writable
44417+ by the default ACL */
44418+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44419+ if (unlikely(obj->mode & GR_WRITE))
44420+ tsk->is_writable = 1;
44421+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44422+ if (unlikely(obj->mode & GR_WRITE))
44423+ tsk->is_writable = 1;
44424+
44425+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44426+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44427+#endif
44428+
44429+out_unlock:
44430+ read_unlock(&grsec_exec_file_lock);
44431+ read_unlock(&tasklist_lock);
44432+ return;
44433+}
44434+
44435+int gr_check_secure_terminal(struct task_struct *task)
44436+{
44437+ struct task_struct *p, *p2, *p3;
44438+ struct files_struct *files;
44439+ struct fdtable *fdt;
44440+ struct file *our_file = NULL, *file;
44441+ int i;
44442+
44443+ if (task->signal->tty == NULL)
44444+ return 1;
44445+
44446+ files = get_files_struct(task);
44447+ if (files != NULL) {
44448+ rcu_read_lock();
44449+ fdt = files_fdtable(files);
44450+ for (i=0; i < fdt->max_fds; i++) {
44451+ file = fcheck_files(files, i);
44452+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44453+ get_file(file);
44454+ our_file = file;
44455+ }
44456+ }
44457+ rcu_read_unlock();
44458+ put_files_struct(files);
44459+ }
44460+
44461+ if (our_file == NULL)
44462+ return 1;
44463+
44464+ read_lock(&tasklist_lock);
44465+ do_each_thread(p2, p) {
44466+ files = get_files_struct(p);
44467+ if (files == NULL ||
44468+ (p->signal && p->signal->tty == task->signal->tty)) {
44469+ if (files != NULL)
44470+ put_files_struct(files);
44471+ continue;
44472+ }
44473+ rcu_read_lock();
44474+ fdt = files_fdtable(files);
44475+ for (i=0; i < fdt->max_fds; i++) {
44476+ file = fcheck_files(files, i);
44477+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44478+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44479+ p3 = task;
44480+ while (p3->pid > 0) {
44481+ if (p3 == p)
44482+ break;
44483+ p3 = p3->real_parent;
44484+ }
44485+ if (p3 == p)
44486+ break;
44487+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44488+ gr_handle_alertkill(p);
44489+ rcu_read_unlock();
44490+ put_files_struct(files);
44491+ read_unlock(&tasklist_lock);
44492+ fput(our_file);
44493+ return 0;
44494+ }
44495+ }
44496+ rcu_read_unlock();
44497+ put_files_struct(files);
44498+ } while_each_thread(p2, p);
44499+ read_unlock(&tasklist_lock);
44500+
44501+ fput(our_file);
44502+ return 1;
44503+}
44504+
44505+ssize_t
44506+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44507+{
44508+ struct gr_arg_wrapper uwrap;
44509+ unsigned char *sprole_salt = NULL;
44510+ unsigned char *sprole_sum = NULL;
44511+ int error = sizeof (struct gr_arg_wrapper);
44512+ int error2 = 0;
44513+
44514+ mutex_lock(&gr_dev_mutex);
44515+
44516+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44517+ error = -EPERM;
44518+ goto out;
44519+ }
44520+
44521+ if (count != sizeof (struct gr_arg_wrapper)) {
44522+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44523+ error = -EINVAL;
44524+ goto out;
44525+ }
44526+
44527+
44528+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44529+ gr_auth_expires = 0;
44530+ gr_auth_attempts = 0;
44531+ }
44532+
44533+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44534+ error = -EFAULT;
44535+ goto out;
44536+ }
44537+
44538+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44539+ error = -EINVAL;
44540+ goto out;
44541+ }
44542+
44543+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44544+ error = -EFAULT;
44545+ goto out;
44546+ }
44547+
44548+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44549+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44550+ time_after(gr_auth_expires, get_seconds())) {
44551+ error = -EBUSY;
44552+ goto out;
44553+ }
44554+
44555+ /* if non-root trying to do anything other than use a special role,
44556+ do not attempt authentication, do not count towards authentication
44557+ locking
44558+ */
44559+
44560+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44561+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44562+ current_uid()) {
44563+ error = -EPERM;
44564+ goto out;
44565+ }
44566+
44567+ /* ensure pw and special role name are null terminated */
44568+
44569+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44570+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44571+
44572+ /* Okay.
44573+ * We have our enough of the argument structure..(we have yet
44574+ * to copy_from_user the tables themselves) . Copy the tables
44575+ * only if we need them, i.e. for loading operations. */
44576+
44577+ switch (gr_usermode->mode) {
44578+ case GR_STATUS:
44579+ if (gr_status & GR_READY) {
44580+ error = 1;
44581+ if (!gr_check_secure_terminal(current))
44582+ error = 3;
44583+ } else
44584+ error = 2;
44585+ goto out;
44586+ case GR_SHUTDOWN:
44587+ if ((gr_status & GR_READY)
44588+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44589+ pax_open_kernel();
44590+ gr_status &= ~GR_READY;
44591+ pax_close_kernel();
44592+
44593+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44594+ free_variables();
44595+ memset(gr_usermode, 0, sizeof (struct gr_arg));
44596+ memset(gr_system_salt, 0, GR_SALT_LEN);
44597+ memset(gr_system_sum, 0, GR_SHA_LEN);
44598+ } else if (gr_status & GR_READY) {
44599+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44600+ error = -EPERM;
44601+ } else {
44602+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44603+ error = -EAGAIN;
44604+ }
44605+ break;
44606+ case GR_ENABLE:
44607+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44608+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44609+ else {
44610+ if (gr_status & GR_READY)
44611+ error = -EAGAIN;
44612+ else
44613+ error = error2;
44614+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44615+ }
44616+ break;
44617+ case GR_RELOAD:
44618+ if (!(gr_status & GR_READY)) {
44619+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44620+ error = -EAGAIN;
44621+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44622+ preempt_disable();
44623+
44624+ pax_open_kernel();
44625+ gr_status &= ~GR_READY;
44626+ pax_close_kernel();
44627+
44628+ free_variables();
44629+ if (!(error2 = gracl_init(gr_usermode))) {
44630+ preempt_enable();
44631+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44632+ } else {
44633+ preempt_enable();
44634+ error = error2;
44635+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44636+ }
44637+ } else {
44638+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44639+ error = -EPERM;
44640+ }
44641+ break;
44642+ case GR_SEGVMOD:
44643+ if (unlikely(!(gr_status & GR_READY))) {
44644+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44645+ error = -EAGAIN;
44646+ break;
44647+ }
44648+
44649+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44650+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44651+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44652+ struct acl_subject_label *segvacl;
44653+ segvacl =
44654+ lookup_acl_subj_label(gr_usermode->segv_inode,
44655+ gr_usermode->segv_device,
44656+ current->role);
44657+ if (segvacl) {
44658+ segvacl->crashes = 0;
44659+ segvacl->expires = 0;
44660+ }
44661+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44662+ gr_remove_uid(gr_usermode->segv_uid);
44663+ }
44664+ } else {
44665+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44666+ error = -EPERM;
44667+ }
44668+ break;
44669+ case GR_SPROLE:
44670+ case GR_SPROLEPAM:
44671+ if (unlikely(!(gr_status & GR_READY))) {
44672+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44673+ error = -EAGAIN;
44674+ break;
44675+ }
44676+
44677+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44678+ current->role->expires = 0;
44679+ current->role->auth_attempts = 0;
44680+ }
44681+
44682+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44683+ time_after(current->role->expires, get_seconds())) {
44684+ error = -EBUSY;
44685+ goto out;
44686+ }
44687+
44688+ if (lookup_special_role_auth
44689+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44690+ && ((!sprole_salt && !sprole_sum)
44691+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44692+ char *p = "";
44693+ assign_special_role(gr_usermode->sp_role);
44694+ read_lock(&tasklist_lock);
44695+ if (current->real_parent)
44696+ p = current->real_parent->role->rolename;
44697+ read_unlock(&tasklist_lock);
44698+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44699+ p, acl_sp_role_value);
44700+ } else {
44701+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44702+ error = -EPERM;
44703+ if(!(current->role->auth_attempts++))
44704+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44705+
44706+ goto out;
44707+ }
44708+ break;
44709+ case GR_UNSPROLE:
44710+ if (unlikely(!(gr_status & GR_READY))) {
44711+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44712+ error = -EAGAIN;
44713+ break;
44714+ }
44715+
44716+ if (current->role->roletype & GR_ROLE_SPECIAL) {
44717+ char *p = "";
44718+ int i = 0;
44719+
44720+ read_lock(&tasklist_lock);
44721+ if (current->real_parent) {
44722+ p = current->real_parent->role->rolename;
44723+ i = current->real_parent->acl_role_id;
44724+ }
44725+ read_unlock(&tasklist_lock);
44726+
44727+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44728+ gr_set_acls(1);
44729+ } else {
44730+ error = -EPERM;
44731+ goto out;
44732+ }
44733+ break;
44734+ default:
44735+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44736+ error = -EINVAL;
44737+ break;
44738+ }
44739+
44740+ if (error != -EPERM)
44741+ goto out;
44742+
44743+ if(!(gr_auth_attempts++))
44744+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44745+
44746+ out:
44747+ mutex_unlock(&gr_dev_mutex);
44748+ return error;
44749+}
44750+
44751+/* must be called with
44752+ rcu_read_lock();
44753+ read_lock(&tasklist_lock);
44754+ read_lock(&grsec_exec_file_lock);
44755+*/
44756+int gr_apply_subject_to_task(struct task_struct *task)
44757+{
44758+ struct acl_object_label *obj;
44759+ char *tmpname;
44760+ struct acl_subject_label *tmpsubj;
44761+ struct file *filp;
44762+ struct name_entry *nmatch;
44763+
44764+ filp = task->exec_file;
44765+ if (filp == NULL)
44766+ return 0;
44767+
44768+ /* the following is to apply the correct subject
44769+ on binaries running when the RBAC system
44770+ is enabled, when the binaries have been
44771+ replaced or deleted since their execution
44772+ -----
44773+ when the RBAC system starts, the inode/dev
44774+ from exec_file will be one the RBAC system
44775+ is unaware of. It only knows the inode/dev
44776+ of the present file on disk, or the absence
44777+ of it.
44778+ */
44779+ preempt_disable();
44780+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44781+
44782+ nmatch = lookup_name_entry(tmpname);
44783+ preempt_enable();
44784+ tmpsubj = NULL;
44785+ if (nmatch) {
44786+ if (nmatch->deleted)
44787+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44788+ else
44789+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44790+ if (tmpsubj != NULL)
44791+ task->acl = tmpsubj;
44792+ }
44793+ if (tmpsubj == NULL)
44794+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44795+ task->role);
44796+ if (task->acl) {
44797+ task->is_writable = 0;
44798+ /* ignore additional mmap checks for processes that are writable
44799+ by the default ACL */
44800+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44801+ if (unlikely(obj->mode & GR_WRITE))
44802+ task->is_writable = 1;
44803+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44804+ if (unlikely(obj->mode & GR_WRITE))
44805+ task->is_writable = 1;
44806+
44807+ gr_set_proc_res(task);
44808+
44809+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44810+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44811+#endif
44812+ } else {
44813+ return 1;
44814+ }
44815+
44816+ return 0;
44817+}
44818+
44819+int
44820+gr_set_acls(const int type)
44821+{
44822+ struct task_struct *task, *task2;
44823+ struct acl_role_label *role = current->role;
44824+ __u16 acl_role_id = current->acl_role_id;
44825+ const struct cred *cred;
44826+ int ret;
44827+
44828+ rcu_read_lock();
44829+ read_lock(&tasklist_lock);
44830+ read_lock(&grsec_exec_file_lock);
44831+ do_each_thread(task2, task) {
44832+ /* check to see if we're called from the exit handler,
44833+ if so, only replace ACLs that have inherited the admin
44834+ ACL */
44835+
44836+ if (type && (task->role != role ||
44837+ task->acl_role_id != acl_role_id))
44838+ continue;
44839+
44840+ task->acl_role_id = 0;
44841+ task->acl_sp_role = 0;
44842+
44843+ if (task->exec_file) {
44844+ cred = __task_cred(task);
44845+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44846+ ret = gr_apply_subject_to_task(task);
44847+ if (ret) {
44848+ read_unlock(&grsec_exec_file_lock);
44849+ read_unlock(&tasklist_lock);
44850+ rcu_read_unlock();
44851+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44852+ return ret;
44853+ }
44854+ } else {
44855+ // it's a kernel process
44856+ task->role = kernel_role;
44857+ task->acl = kernel_role->root_label;
44858+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44859+ task->acl->mode &= ~GR_PROCFIND;
44860+#endif
44861+ }
44862+ } while_each_thread(task2, task);
44863+ read_unlock(&grsec_exec_file_lock);
44864+ read_unlock(&tasklist_lock);
44865+ rcu_read_unlock();
44866+
44867+ return 0;
44868+}
44869+
44870+void
44871+gr_learn_resource(const struct task_struct *task,
44872+ const int res, const unsigned long wanted, const int gt)
44873+{
44874+ struct acl_subject_label *acl;
44875+ const struct cred *cred;
44876+
44877+ if (unlikely((gr_status & GR_READY) &&
44878+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44879+ goto skip_reslog;
44880+
44881+#ifdef CONFIG_GRKERNSEC_RESLOG
44882+ gr_log_resource(task, res, wanted, gt);
44883+#endif
44884+ skip_reslog:
44885+
44886+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44887+ return;
44888+
44889+ acl = task->acl;
44890+
44891+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44892+ !(acl->resmask & (1 << (unsigned short) res))))
44893+ return;
44894+
44895+ if (wanted >= acl->res[res].rlim_cur) {
44896+ unsigned long res_add;
44897+
44898+ res_add = wanted;
44899+ switch (res) {
44900+ case RLIMIT_CPU:
44901+ res_add += GR_RLIM_CPU_BUMP;
44902+ break;
44903+ case RLIMIT_FSIZE:
44904+ res_add += GR_RLIM_FSIZE_BUMP;
44905+ break;
44906+ case RLIMIT_DATA:
44907+ res_add += GR_RLIM_DATA_BUMP;
44908+ break;
44909+ case RLIMIT_STACK:
44910+ res_add += GR_RLIM_STACK_BUMP;
44911+ break;
44912+ case RLIMIT_CORE:
44913+ res_add += GR_RLIM_CORE_BUMP;
44914+ break;
44915+ case RLIMIT_RSS:
44916+ res_add += GR_RLIM_RSS_BUMP;
44917+ break;
44918+ case RLIMIT_NPROC:
44919+ res_add += GR_RLIM_NPROC_BUMP;
44920+ break;
44921+ case RLIMIT_NOFILE:
44922+ res_add += GR_RLIM_NOFILE_BUMP;
44923+ break;
44924+ case RLIMIT_MEMLOCK:
44925+ res_add += GR_RLIM_MEMLOCK_BUMP;
44926+ break;
44927+ case RLIMIT_AS:
44928+ res_add += GR_RLIM_AS_BUMP;
44929+ break;
44930+ case RLIMIT_LOCKS:
44931+ res_add += GR_RLIM_LOCKS_BUMP;
44932+ break;
44933+ case RLIMIT_SIGPENDING:
44934+ res_add += GR_RLIM_SIGPENDING_BUMP;
44935+ break;
44936+ case RLIMIT_MSGQUEUE:
44937+ res_add += GR_RLIM_MSGQUEUE_BUMP;
44938+ break;
44939+ case RLIMIT_NICE:
44940+ res_add += GR_RLIM_NICE_BUMP;
44941+ break;
44942+ case RLIMIT_RTPRIO:
44943+ res_add += GR_RLIM_RTPRIO_BUMP;
44944+ break;
44945+ case RLIMIT_RTTIME:
44946+ res_add += GR_RLIM_RTTIME_BUMP;
44947+ break;
44948+ }
44949+
44950+ acl->res[res].rlim_cur = res_add;
44951+
44952+ if (wanted > acl->res[res].rlim_max)
44953+ acl->res[res].rlim_max = res_add;
44954+
44955+ /* only log the subject filename, since resource logging is supported for
44956+ single-subject learning only */
44957+ rcu_read_lock();
44958+ cred = __task_cred(task);
44959+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44960+ task->role->roletype, cred->uid, cred->gid, acl->filename,
44961+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44962+ "", (unsigned long) res, &task->signal->saved_ip);
44963+ rcu_read_unlock();
44964+ }
44965+
44966+ return;
44967+}
44968+
44969+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44970+void
44971+pax_set_initial_flags(struct linux_binprm *bprm)
44972+{
44973+ struct task_struct *task = current;
44974+ struct acl_subject_label *proc;
44975+ unsigned long flags;
44976+
44977+ if (unlikely(!(gr_status & GR_READY)))
44978+ return;
44979+
44980+ flags = pax_get_flags(task);
44981+
44982+ proc = task->acl;
44983+
44984+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44985+ flags &= ~MF_PAX_PAGEEXEC;
44986+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44987+ flags &= ~MF_PAX_SEGMEXEC;
44988+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44989+ flags &= ~MF_PAX_RANDMMAP;
44990+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44991+ flags &= ~MF_PAX_EMUTRAMP;
44992+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44993+ flags &= ~MF_PAX_MPROTECT;
44994+
44995+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44996+ flags |= MF_PAX_PAGEEXEC;
44997+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44998+ flags |= MF_PAX_SEGMEXEC;
44999+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
45000+ flags |= MF_PAX_RANDMMAP;
45001+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
45002+ flags |= MF_PAX_EMUTRAMP;
45003+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
45004+ flags |= MF_PAX_MPROTECT;
45005+
45006+ pax_set_flags(task, flags);
45007+
45008+ return;
45009+}
45010+#endif
45011+
45012+#ifdef CONFIG_SYSCTL
45013+/* Eric Biederman likes breaking userland ABI and every inode-based security
45014+ system to save 35kb of memory */
45015+
45016+/* we modify the passed in filename, but adjust it back before returning */
45017+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
45018+{
45019+ struct name_entry *nmatch;
45020+ char *p, *lastp = NULL;
45021+ struct acl_object_label *obj = NULL, *tmp;
45022+ struct acl_subject_label *tmpsubj;
45023+ char c = '\0';
45024+
45025+ read_lock(&gr_inode_lock);
45026+
45027+ p = name + len - 1;
45028+ do {
45029+ nmatch = lookup_name_entry(name);
45030+ if (lastp != NULL)
45031+ *lastp = c;
45032+
45033+ if (nmatch == NULL)
45034+ goto next_component;
45035+ tmpsubj = current->acl;
45036+ do {
45037+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
45038+ if (obj != NULL) {
45039+ tmp = obj->globbed;
45040+ while (tmp) {
45041+ if (!glob_match(tmp->filename, name)) {
45042+ obj = tmp;
45043+ goto found_obj;
45044+ }
45045+ tmp = tmp->next;
45046+ }
45047+ goto found_obj;
45048+ }
45049+ } while ((tmpsubj = tmpsubj->parent_subject));
45050+next_component:
45051+ /* end case */
45052+ if (p == name)
45053+ break;
45054+
45055+ while (*p != '/')
45056+ p--;
45057+ if (p == name)
45058+ lastp = p + 1;
45059+ else {
45060+ lastp = p;
45061+ p--;
45062+ }
45063+ c = *lastp;
45064+ *lastp = '\0';
45065+ } while (1);
45066+found_obj:
45067+ read_unlock(&gr_inode_lock);
45068+ /* obj returned will always be non-null */
45069+ return obj;
45070+}
45071+
45072+/* returns 0 when allowing, non-zero on error
45073+ op of 0 is used for readdir, so we don't log the names of hidden files
45074+*/
45075+__u32
45076+gr_handle_sysctl(const struct ctl_table *table, const int op)
45077+{
45078+ struct ctl_table *tmp;
45079+ const char *proc_sys = "/proc/sys";
45080+ char *path;
45081+ struct acl_object_label *obj;
45082+ unsigned short len = 0, pos = 0, depth = 0, i;
45083+ __u32 err = 0;
45084+ __u32 mode = 0;
45085+
45086+ if (unlikely(!(gr_status & GR_READY)))
45087+ return 0;
45088+
45089+ /* for now, ignore operations on non-sysctl entries if it's not a
45090+ readdir*/
45091+ if (table->child != NULL && op != 0)
45092+ return 0;
45093+
45094+ mode |= GR_FIND;
45095+ /* it's only a read if it's an entry, read on dirs is for readdir */
45096+ if (op & MAY_READ)
45097+ mode |= GR_READ;
45098+ if (op & MAY_WRITE)
45099+ mode |= GR_WRITE;
45100+
45101+ preempt_disable();
45102+
45103+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45104+
45105+ /* it's only a read/write if it's an actual entry, not a dir
45106+ (which are opened for readdir)
45107+ */
45108+
45109+ /* convert the requested sysctl entry into a pathname */
45110+
45111+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45112+ len += strlen(tmp->procname);
45113+ len++;
45114+ depth++;
45115+ }
45116+
45117+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45118+ /* deny */
45119+ goto out;
45120+ }
45121+
45122+ memset(path, 0, PAGE_SIZE);
45123+
45124+ memcpy(path, proc_sys, strlen(proc_sys));
45125+
45126+ pos += strlen(proc_sys);
45127+
45128+ for (; depth > 0; depth--) {
45129+ path[pos] = '/';
45130+ pos++;
45131+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45132+ if (depth == i) {
45133+ memcpy(path + pos, tmp->procname,
45134+ strlen(tmp->procname));
45135+ pos += strlen(tmp->procname);
45136+ }
45137+ i++;
45138+ }
45139+ }
45140+
45141+ obj = gr_lookup_by_name(path, pos);
45142+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45143+
45144+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45145+ ((err & mode) != mode))) {
45146+ __u32 new_mode = mode;
45147+
45148+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45149+
45150+ err = 0;
45151+ gr_log_learn_sysctl(path, new_mode);
45152+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45153+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45154+ err = -ENOENT;
45155+ } else if (!(err & GR_FIND)) {
45156+ err = -ENOENT;
45157+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45158+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45159+ path, (mode & GR_READ) ? " reading" : "",
45160+ (mode & GR_WRITE) ? " writing" : "");
45161+ err = -EACCES;
45162+ } else if ((err & mode) != mode) {
45163+ err = -EACCES;
45164+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45165+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45166+ path, (mode & GR_READ) ? " reading" : "",
45167+ (mode & GR_WRITE) ? " writing" : "");
45168+ err = 0;
45169+ } else
45170+ err = 0;
45171+
45172+ out:
45173+ preempt_enable();
45174+
45175+ return err;
45176+}
45177+#endif
45178+
45179+int
45180+gr_handle_proc_ptrace(struct task_struct *task)
45181+{
45182+ struct file *filp;
45183+ struct task_struct *tmp = task;
45184+ struct task_struct *curtemp = current;
45185+ __u32 retmode;
45186+
45187+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45188+ if (unlikely(!(gr_status & GR_READY)))
45189+ return 0;
45190+#endif
45191+
45192+ read_lock(&tasklist_lock);
45193+ read_lock(&grsec_exec_file_lock);
45194+ filp = task->exec_file;
45195+
45196+ while (tmp->pid > 0) {
45197+ if (tmp == curtemp)
45198+ break;
45199+ tmp = tmp->real_parent;
45200+ }
45201+
45202+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45203+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45204+ read_unlock(&grsec_exec_file_lock);
45205+ read_unlock(&tasklist_lock);
45206+ return 1;
45207+ }
45208+
45209+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45210+ if (!(gr_status & GR_READY)) {
45211+ read_unlock(&grsec_exec_file_lock);
45212+ read_unlock(&tasklist_lock);
45213+ return 0;
45214+ }
45215+#endif
45216+
45217+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45218+ read_unlock(&grsec_exec_file_lock);
45219+ read_unlock(&tasklist_lock);
45220+
45221+ if (retmode & GR_NOPTRACE)
45222+ return 1;
45223+
45224+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45225+ && (current->acl != task->acl || (current->acl != current->role->root_label
45226+ && current->pid != task->pid)))
45227+ return 1;
45228+
45229+ return 0;
45230+}
45231+
45232+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45233+{
45234+ if (unlikely(!(gr_status & GR_READY)))
45235+ return;
45236+
45237+ if (!(current->role->roletype & GR_ROLE_GOD))
45238+ return;
45239+
45240+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45241+ p->role->rolename, gr_task_roletype_to_char(p),
45242+ p->acl->filename);
45243+}
45244+
45245+int
45246+gr_handle_ptrace(struct task_struct *task, const long request)
45247+{
45248+ struct task_struct *tmp = task;
45249+ struct task_struct *curtemp = current;
45250+ __u32 retmode;
45251+
45252+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45253+ if (unlikely(!(gr_status & GR_READY)))
45254+ return 0;
45255+#endif
45256+
45257+ read_lock(&tasklist_lock);
45258+ while (tmp->pid > 0) {
45259+ if (tmp == curtemp)
45260+ break;
45261+ tmp = tmp->real_parent;
45262+ }
45263+
45264+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45265+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45266+ read_unlock(&tasklist_lock);
45267+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45268+ return 1;
45269+ }
45270+ read_unlock(&tasklist_lock);
45271+
45272+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45273+ if (!(gr_status & GR_READY))
45274+ return 0;
45275+#endif
45276+
45277+ read_lock(&grsec_exec_file_lock);
45278+ if (unlikely(!task->exec_file)) {
45279+ read_unlock(&grsec_exec_file_lock);
45280+ return 0;
45281+ }
45282+
45283+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45284+ read_unlock(&grsec_exec_file_lock);
45285+
45286+ if (retmode & GR_NOPTRACE) {
45287+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45288+ return 1;
45289+ }
45290+
45291+ if (retmode & GR_PTRACERD) {
45292+ switch (request) {
45293+ case PTRACE_POKETEXT:
45294+ case PTRACE_POKEDATA:
45295+ case PTRACE_POKEUSR:
45296+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45297+ case PTRACE_SETREGS:
45298+ case PTRACE_SETFPREGS:
45299+#endif
45300+#ifdef CONFIG_X86
45301+ case PTRACE_SETFPXREGS:
45302+#endif
45303+#ifdef CONFIG_ALTIVEC
45304+ case PTRACE_SETVRREGS:
45305+#endif
45306+ return 1;
45307+ default:
45308+ return 0;
45309+ }
45310+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
45311+ !(current->role->roletype & GR_ROLE_GOD) &&
45312+ (current->acl != task->acl)) {
45313+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45314+ return 1;
45315+ }
45316+
45317+ return 0;
45318+}
45319+
45320+static int is_writable_mmap(const struct file *filp)
45321+{
45322+ struct task_struct *task = current;
45323+ struct acl_object_label *obj, *obj2;
45324+
45325+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45326+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45327+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45328+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45329+ task->role->root_label);
45330+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45331+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45332+ return 1;
45333+ }
45334+ }
45335+ return 0;
45336+}
45337+
45338+int
45339+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45340+{
45341+ __u32 mode;
45342+
45343+ if (unlikely(!file || !(prot & PROT_EXEC)))
45344+ return 1;
45345+
45346+ if (is_writable_mmap(file))
45347+ return 0;
45348+
45349+ mode =
45350+ gr_search_file(file->f_path.dentry,
45351+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45352+ file->f_path.mnt);
45353+
45354+ if (!gr_tpe_allow(file))
45355+ return 0;
45356+
45357+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45358+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45359+ return 0;
45360+ } else if (unlikely(!(mode & GR_EXEC))) {
45361+ return 0;
45362+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45363+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45364+ return 1;
45365+ }
45366+
45367+ return 1;
45368+}
45369+
45370+int
45371+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45372+{
45373+ __u32 mode;
45374+
45375+ if (unlikely(!file || !(prot & PROT_EXEC)))
45376+ return 1;
45377+
45378+ if (is_writable_mmap(file))
45379+ return 0;
45380+
45381+ mode =
45382+ gr_search_file(file->f_path.dentry,
45383+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45384+ file->f_path.mnt);
45385+
45386+ if (!gr_tpe_allow(file))
45387+ return 0;
45388+
45389+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45390+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45391+ return 0;
45392+ } else if (unlikely(!(mode & GR_EXEC))) {
45393+ return 0;
45394+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45395+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45396+ return 1;
45397+ }
45398+
45399+ return 1;
45400+}
45401+
45402+void
45403+gr_acl_handle_psacct(struct task_struct *task, const long code)
45404+{
45405+ unsigned long runtime;
45406+ unsigned long cputime;
45407+ unsigned int wday, cday;
45408+ __u8 whr, chr;
45409+ __u8 wmin, cmin;
45410+ __u8 wsec, csec;
45411+ struct timespec timeval;
45412+
45413+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45414+ !(task->acl->mode & GR_PROCACCT)))
45415+ return;
45416+
45417+ do_posix_clock_monotonic_gettime(&timeval);
45418+ runtime = timeval.tv_sec - task->start_time.tv_sec;
45419+ wday = runtime / (3600 * 24);
45420+ runtime -= wday * (3600 * 24);
45421+ whr = runtime / 3600;
45422+ runtime -= whr * 3600;
45423+ wmin = runtime / 60;
45424+ runtime -= wmin * 60;
45425+ wsec = runtime;
45426+
45427+ cputime = (task->utime + task->stime) / HZ;
45428+ cday = cputime / (3600 * 24);
45429+ cputime -= cday * (3600 * 24);
45430+ chr = cputime / 3600;
45431+ cputime -= chr * 3600;
45432+ cmin = cputime / 60;
45433+ cputime -= cmin * 60;
45434+ csec = cputime;
45435+
45436+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45437+
45438+ return;
45439+}
45440+
45441+void gr_set_kernel_label(struct task_struct *task)
45442+{
45443+ if (gr_status & GR_READY) {
45444+ task->role = kernel_role;
45445+ task->acl = kernel_role->root_label;
45446+ }
45447+ return;
45448+}
45449+
45450+#ifdef CONFIG_TASKSTATS
45451+int gr_is_taskstats_denied(int pid)
45452+{
45453+ struct task_struct *task;
45454+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45455+ const struct cred *cred;
45456+#endif
45457+ int ret = 0;
45458+
45459+ /* restrict taskstats viewing to un-chrooted root users
45460+ who have the 'view' subject flag if the RBAC system is enabled
45461+ */
45462+
45463+ rcu_read_lock();
45464+ read_lock(&tasklist_lock);
45465+ task = find_task_by_vpid(pid);
45466+ if (task) {
45467+#ifdef CONFIG_GRKERNSEC_CHROOT
45468+ if (proc_is_chrooted(task))
45469+ ret = -EACCES;
45470+#endif
45471+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45472+ cred = __task_cred(task);
45473+#ifdef CONFIG_GRKERNSEC_PROC_USER
45474+ if (cred->uid != 0)
45475+ ret = -EACCES;
45476+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45477+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45478+ ret = -EACCES;
45479+#endif
45480+#endif
45481+ if (gr_status & GR_READY) {
45482+ if (!(task->acl->mode & GR_VIEW))
45483+ ret = -EACCES;
45484+ }
45485+ } else
45486+ ret = -ENOENT;
45487+
45488+ read_unlock(&tasklist_lock);
45489+ rcu_read_unlock();
45490+
45491+ return ret;
45492+}
45493+#endif
45494+
45495+/* AUXV entries are filled via a descendant of search_binary_handler
45496+ after we've already applied the subject for the target
45497+*/
45498+int gr_acl_enable_at_secure(void)
45499+{
45500+ if (unlikely(!(gr_status & GR_READY)))
45501+ return 0;
45502+
45503+ if (current->acl->mode & GR_ATSECURE)
45504+ return 1;
45505+
45506+ return 0;
45507+}
45508+
45509+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45510+{
45511+ struct task_struct *task = current;
45512+ struct dentry *dentry = file->f_path.dentry;
45513+ struct vfsmount *mnt = file->f_path.mnt;
45514+ struct acl_object_label *obj, *tmp;
45515+ struct acl_subject_label *subj;
45516+ unsigned int bufsize;
45517+ int is_not_root;
45518+ char *path;
45519+ dev_t dev = __get_dev(dentry);
45520+
45521+ if (unlikely(!(gr_status & GR_READY)))
45522+ return 1;
45523+
45524+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45525+ return 1;
45526+
45527+ /* ignore Eric Biederman */
45528+ if (IS_PRIVATE(dentry->d_inode))
45529+ return 1;
45530+
45531+ subj = task->acl;
45532+ do {
45533+ obj = lookup_acl_obj_label(ino, dev, subj);
45534+ if (obj != NULL)
45535+ return (obj->mode & GR_FIND) ? 1 : 0;
45536+ } while ((subj = subj->parent_subject));
45537+
45538+ /* this is purely an optimization since we're looking for an object
45539+ for the directory we're doing a readdir on
45540+ if it's possible for any globbed object to match the entry we're
45541+ filling into the directory, then the object we find here will be
45542+ an anchor point with attached globbed objects
45543+ */
45544+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45545+ if (obj->globbed == NULL)
45546+ return (obj->mode & GR_FIND) ? 1 : 0;
45547+
45548+ is_not_root = ((obj->filename[0] == '/') &&
45549+ (obj->filename[1] == '\0')) ? 0 : 1;
45550+ bufsize = PAGE_SIZE - namelen - is_not_root;
45551+
45552+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
45553+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45554+ return 1;
45555+
45556+ preempt_disable();
45557+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45558+ bufsize);
45559+
45560+ bufsize = strlen(path);
45561+
45562+ /* if base is "/", don't append an additional slash */
45563+ if (is_not_root)
45564+ *(path + bufsize) = '/';
45565+ memcpy(path + bufsize + is_not_root, name, namelen);
45566+ *(path + bufsize + namelen + is_not_root) = '\0';
45567+
45568+ tmp = obj->globbed;
45569+ while (tmp) {
45570+ if (!glob_match(tmp->filename, path)) {
45571+ preempt_enable();
45572+ return (tmp->mode & GR_FIND) ? 1 : 0;
45573+ }
45574+ tmp = tmp->next;
45575+ }
45576+ preempt_enable();
45577+ return (obj->mode & GR_FIND) ? 1 : 0;
45578+}
45579+
45580+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45581+EXPORT_SYMBOL(gr_acl_is_enabled);
45582+#endif
45583+EXPORT_SYMBOL(gr_learn_resource);
45584+EXPORT_SYMBOL(gr_set_kernel_label);
45585+#ifdef CONFIG_SECURITY
45586+EXPORT_SYMBOL(gr_check_user_change);
45587+EXPORT_SYMBOL(gr_check_group_change);
45588+#endif
45589+
45590diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45591--- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45592+++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45593@@ -0,0 +1,139 @@
45594+#include <linux/kernel.h>
45595+#include <linux/module.h>
45596+#include <linux/sched.h>
45597+#include <linux/gracl.h>
45598+#include <linux/grsecurity.h>
45599+#include <linux/grinternal.h>
45600+
45601+static const char *captab_log[] = {
45602+ "CAP_CHOWN",
45603+ "CAP_DAC_OVERRIDE",
45604+ "CAP_DAC_READ_SEARCH",
45605+ "CAP_FOWNER",
45606+ "CAP_FSETID",
45607+ "CAP_KILL",
45608+ "CAP_SETGID",
45609+ "CAP_SETUID",
45610+ "CAP_SETPCAP",
45611+ "CAP_LINUX_IMMUTABLE",
45612+ "CAP_NET_BIND_SERVICE",
45613+ "CAP_NET_BROADCAST",
45614+ "CAP_NET_ADMIN",
45615+ "CAP_NET_RAW",
45616+ "CAP_IPC_LOCK",
45617+ "CAP_IPC_OWNER",
45618+ "CAP_SYS_MODULE",
45619+ "CAP_SYS_RAWIO",
45620+ "CAP_SYS_CHROOT",
45621+ "CAP_SYS_PTRACE",
45622+ "CAP_SYS_PACCT",
45623+ "CAP_SYS_ADMIN",
45624+ "CAP_SYS_BOOT",
45625+ "CAP_SYS_NICE",
45626+ "CAP_SYS_RESOURCE",
45627+ "CAP_SYS_TIME",
45628+ "CAP_SYS_TTY_CONFIG",
45629+ "CAP_MKNOD",
45630+ "CAP_LEASE",
45631+ "CAP_AUDIT_WRITE",
45632+ "CAP_AUDIT_CONTROL",
45633+ "CAP_SETFCAP",
45634+ "CAP_MAC_OVERRIDE",
45635+ "CAP_MAC_ADMIN",
45636+ "CAP_SYSLOG"
45637+};
45638+
45639+EXPORT_SYMBOL(gr_is_capable);
45640+EXPORT_SYMBOL(gr_is_capable_nolog);
45641+
45642+int
45643+gr_is_capable(const int cap)
45644+{
45645+ struct task_struct *task = current;
45646+ const struct cred *cred = current_cred();
45647+ struct acl_subject_label *curracl;
45648+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45649+ kernel_cap_t cap_audit = __cap_empty_set;
45650+
45651+ if (!gr_acl_is_enabled())
45652+ return 1;
45653+
45654+ curracl = task->acl;
45655+
45656+ cap_drop = curracl->cap_lower;
45657+ cap_mask = curracl->cap_mask;
45658+ cap_audit = curracl->cap_invert_audit;
45659+
45660+ while ((curracl = curracl->parent_subject)) {
45661+ /* if the cap isn't specified in the current computed mask but is specified in the
45662+ current level subject, and is lowered in the current level subject, then add
45663+ it to the set of dropped capabilities
45664+ otherwise, add the current level subject's mask to the current computed mask
45665+ */
45666+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45667+ cap_raise(cap_mask, cap);
45668+ if (cap_raised(curracl->cap_lower, cap))
45669+ cap_raise(cap_drop, cap);
45670+ if (cap_raised(curracl->cap_invert_audit, cap))
45671+ cap_raise(cap_audit, cap);
45672+ }
45673+ }
45674+
45675+ if (!cap_raised(cap_drop, cap)) {
45676+ if (cap_raised(cap_audit, cap))
45677+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45678+ return 1;
45679+ }
45680+
45681+ curracl = task->acl;
45682+
45683+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45684+ && cap_raised(cred->cap_effective, cap)) {
45685+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45686+ task->role->roletype, cred->uid,
45687+ cred->gid, task->exec_file ?
45688+ gr_to_filename(task->exec_file->f_path.dentry,
45689+ task->exec_file->f_path.mnt) : curracl->filename,
45690+ curracl->filename, 0UL,
45691+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45692+ return 1;
45693+ }
45694+
45695+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45696+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45697+ return 0;
45698+}
45699+
45700+int
45701+gr_is_capable_nolog(const int cap)
45702+{
45703+ struct acl_subject_label *curracl;
45704+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45705+
45706+ if (!gr_acl_is_enabled())
45707+ return 1;
45708+
45709+ curracl = current->acl;
45710+
45711+ cap_drop = curracl->cap_lower;
45712+ cap_mask = curracl->cap_mask;
45713+
45714+ while ((curracl = curracl->parent_subject)) {
45715+ /* if the cap isn't specified in the current computed mask but is specified in the
45716+ current level subject, and is lowered in the current level subject, then add
45717+ it to the set of dropped capabilities
45718+ otherwise, add the current level subject's mask to the current computed mask
45719+ */
45720+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45721+ cap_raise(cap_mask, cap);
45722+ if (cap_raised(curracl->cap_lower, cap))
45723+ cap_raise(cap_drop, cap);
45724+ }
45725+ }
45726+
45727+ if (!cap_raised(cap_drop, cap))
45728+ return 1;
45729+
45730+ return 0;
45731+}
45732+
45733diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45734--- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45735+++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45736@@ -0,0 +1,431 @@
45737+#include <linux/kernel.h>
45738+#include <linux/sched.h>
45739+#include <linux/types.h>
45740+#include <linux/fs.h>
45741+#include <linux/file.h>
45742+#include <linux/stat.h>
45743+#include <linux/grsecurity.h>
45744+#include <linux/grinternal.h>
45745+#include <linux/gracl.h>
45746+
45747+__u32
45748+gr_acl_handle_hidden_file(const struct dentry * dentry,
45749+ const struct vfsmount * mnt)
45750+{
45751+ __u32 mode;
45752+
45753+ if (unlikely(!dentry->d_inode))
45754+ return GR_FIND;
45755+
45756+ mode =
45757+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45758+
45759+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45760+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45761+ return mode;
45762+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45763+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45764+ return 0;
45765+ } else if (unlikely(!(mode & GR_FIND)))
45766+ return 0;
45767+
45768+ return GR_FIND;
45769+}
45770+
45771+__u32
45772+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45773+ const int fmode)
45774+{
45775+ __u32 reqmode = GR_FIND;
45776+ __u32 mode;
45777+
45778+ if (unlikely(!dentry->d_inode))
45779+ return reqmode;
45780+
45781+ if (unlikely(fmode & O_APPEND))
45782+ reqmode |= GR_APPEND;
45783+ else if (unlikely(fmode & FMODE_WRITE))
45784+ reqmode |= GR_WRITE;
45785+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45786+ reqmode |= GR_READ;
45787+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45788+ reqmode &= ~GR_READ;
45789+ mode =
45790+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45791+ mnt);
45792+
45793+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45794+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45795+ reqmode & GR_READ ? " reading" : "",
45796+ reqmode & GR_WRITE ? " writing" : reqmode &
45797+ GR_APPEND ? " appending" : "");
45798+ return reqmode;
45799+ } else
45800+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45801+ {
45802+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45803+ reqmode & GR_READ ? " reading" : "",
45804+ reqmode & GR_WRITE ? " writing" : reqmode &
45805+ GR_APPEND ? " appending" : "");
45806+ return 0;
45807+ } else if (unlikely((mode & reqmode) != reqmode))
45808+ return 0;
45809+
45810+ return reqmode;
45811+}
45812+
45813+__u32
45814+gr_acl_handle_creat(const struct dentry * dentry,
45815+ const struct dentry * p_dentry,
45816+ const struct vfsmount * p_mnt, const int fmode,
45817+ const int imode)
45818+{
45819+ __u32 reqmode = GR_WRITE | GR_CREATE;
45820+ __u32 mode;
45821+
45822+ if (unlikely(fmode & O_APPEND))
45823+ reqmode |= GR_APPEND;
45824+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45825+ reqmode |= GR_READ;
45826+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45827+ reqmode |= GR_SETID;
45828+
45829+ mode =
45830+ gr_check_create(dentry, p_dentry, p_mnt,
45831+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45832+
45833+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45834+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45835+ reqmode & GR_READ ? " reading" : "",
45836+ reqmode & GR_WRITE ? " writing" : reqmode &
45837+ GR_APPEND ? " appending" : "");
45838+ return reqmode;
45839+ } else
45840+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45841+ {
45842+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45843+ reqmode & GR_READ ? " reading" : "",
45844+ reqmode & GR_WRITE ? " writing" : reqmode &
45845+ GR_APPEND ? " appending" : "");
45846+ return 0;
45847+ } else if (unlikely((mode & reqmode) != reqmode))
45848+ return 0;
45849+
45850+ return reqmode;
45851+}
45852+
45853+__u32
45854+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45855+ const int fmode)
45856+{
45857+ __u32 mode, reqmode = GR_FIND;
45858+
45859+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45860+ reqmode |= GR_EXEC;
45861+ if (fmode & S_IWOTH)
45862+ reqmode |= GR_WRITE;
45863+ if (fmode & S_IROTH)
45864+ reqmode |= GR_READ;
45865+
45866+ mode =
45867+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45868+ mnt);
45869+
45870+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45871+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45872+ reqmode & GR_READ ? " reading" : "",
45873+ reqmode & GR_WRITE ? " writing" : "",
45874+ reqmode & GR_EXEC ? " executing" : "");
45875+ return reqmode;
45876+ } else
45877+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45878+ {
45879+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45880+ reqmode & GR_READ ? " reading" : "",
45881+ reqmode & GR_WRITE ? " writing" : "",
45882+ reqmode & GR_EXEC ? " executing" : "");
45883+ return 0;
45884+ } else if (unlikely((mode & reqmode) != reqmode))
45885+ return 0;
45886+
45887+ return reqmode;
45888+}
45889+
45890+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45891+{
45892+ __u32 mode;
45893+
45894+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45895+
45896+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45897+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45898+ return mode;
45899+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45900+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45901+ return 0;
45902+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45903+ return 0;
45904+
45905+ return (reqmode);
45906+}
45907+
45908+__u32
45909+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45910+{
45911+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45912+}
45913+
45914+__u32
45915+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45916+{
45917+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45918+}
45919+
45920+__u32
45921+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45922+{
45923+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45924+}
45925+
45926+__u32
45927+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45928+{
45929+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45930+}
45931+
45932+__u32
45933+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45934+ mode_t mode)
45935+{
45936+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45937+ return 1;
45938+
45939+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45940+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45941+ GR_FCHMOD_ACL_MSG);
45942+ } else {
45943+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45944+ }
45945+}
45946+
45947+__u32
45948+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45949+ mode_t mode)
45950+{
45951+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45952+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45953+ GR_CHMOD_ACL_MSG);
45954+ } else {
45955+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45956+ }
45957+}
45958+
45959+__u32
45960+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45961+{
45962+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45963+}
45964+
45965+__u32
45966+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45967+{
45968+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45969+}
45970+
45971+__u32
45972+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45973+{
45974+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45975+}
45976+
45977+__u32
45978+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45979+{
45980+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45981+ GR_UNIXCONNECT_ACL_MSG);
45982+}
45983+
45984+/* hardlinks require at minimum create permission,
45985+ any additional privilege required is based on the
45986+ privilege of the file being linked to
45987+*/
45988+__u32
45989+gr_acl_handle_link(const struct dentry * new_dentry,
45990+ const struct dentry * parent_dentry,
45991+ const struct vfsmount * parent_mnt,
45992+ const struct dentry * old_dentry,
45993+ const struct vfsmount * old_mnt, const char *to)
45994+{
45995+ __u32 mode;
45996+ __u32 needmode = GR_CREATE | GR_LINK;
45997+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45998+
45999+ mode =
46000+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
46001+ old_mnt);
46002+
46003+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
46004+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46005+ return mode;
46006+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46007+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46008+ return 0;
46009+ } else if (unlikely((mode & needmode) != needmode))
46010+ return 0;
46011+
46012+ return 1;
46013+}
46014+
46015+__u32
46016+gr_acl_handle_symlink(const struct dentry * new_dentry,
46017+ const struct dentry * parent_dentry,
46018+ const struct vfsmount * parent_mnt, const char *from)
46019+{
46020+ __u32 needmode = GR_WRITE | GR_CREATE;
46021+ __u32 mode;
46022+
46023+ mode =
46024+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
46025+ GR_CREATE | GR_AUDIT_CREATE |
46026+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
46027+
46028+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
46029+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46030+ return mode;
46031+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46032+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46033+ return 0;
46034+ } else if (unlikely((mode & needmode) != needmode))
46035+ return 0;
46036+
46037+ return (GR_WRITE | GR_CREATE);
46038+}
46039+
46040+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
46041+{
46042+ __u32 mode;
46043+
46044+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
46045+
46046+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
46047+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
46048+ return mode;
46049+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
46050+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
46051+ return 0;
46052+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
46053+ return 0;
46054+
46055+ return (reqmode);
46056+}
46057+
46058+__u32
46059+gr_acl_handle_mknod(const struct dentry * new_dentry,
46060+ const struct dentry * parent_dentry,
46061+ const struct vfsmount * parent_mnt,
46062+ const int mode)
46063+{
46064+ __u32 reqmode = GR_WRITE | GR_CREATE;
46065+ if (unlikely(mode & (S_ISUID | S_ISGID)))
46066+ reqmode |= GR_SETID;
46067+
46068+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46069+ reqmode, GR_MKNOD_ACL_MSG);
46070+}
46071+
46072+__u32
46073+gr_acl_handle_mkdir(const struct dentry *new_dentry,
46074+ const struct dentry *parent_dentry,
46075+ const struct vfsmount *parent_mnt)
46076+{
46077+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46078+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
46079+}
46080+
46081+#define RENAME_CHECK_SUCCESS(old, new) \
46082+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46083+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46084+
46085+int
46086+gr_acl_handle_rename(struct dentry *new_dentry,
46087+ struct dentry *parent_dentry,
46088+ const struct vfsmount *parent_mnt,
46089+ struct dentry *old_dentry,
46090+ struct inode *old_parent_inode,
46091+ struct vfsmount *old_mnt, const char *newname)
46092+{
46093+ __u32 comp1, comp2;
46094+ int error = 0;
46095+
46096+ if (unlikely(!gr_acl_is_enabled()))
46097+ return 0;
46098+
46099+ if (!new_dentry->d_inode) {
46100+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46101+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46102+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46103+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46104+ GR_DELETE | GR_AUDIT_DELETE |
46105+ GR_AUDIT_READ | GR_AUDIT_WRITE |
46106+ GR_SUPPRESS, old_mnt);
46107+ } else {
46108+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46109+ GR_CREATE | GR_DELETE |
46110+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46111+ GR_AUDIT_READ | GR_AUDIT_WRITE |
46112+ GR_SUPPRESS, parent_mnt);
46113+ comp2 =
46114+ gr_search_file(old_dentry,
46115+ GR_READ | GR_WRITE | GR_AUDIT_READ |
46116+ GR_DELETE | GR_AUDIT_DELETE |
46117+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46118+ }
46119+
46120+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46121+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46122+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46123+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46124+ && !(comp2 & GR_SUPPRESS)) {
46125+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46126+ error = -EACCES;
46127+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46128+ error = -EACCES;
46129+
46130+ return error;
46131+}
46132+
46133+void
46134+gr_acl_handle_exit(void)
46135+{
46136+ u16 id;
46137+ char *rolename;
46138+ struct file *exec_file;
46139+
46140+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46141+ !(current->role->roletype & GR_ROLE_PERSIST))) {
46142+ id = current->acl_role_id;
46143+ rolename = current->role->rolename;
46144+ gr_set_acls(1);
46145+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46146+ }
46147+
46148+ write_lock(&grsec_exec_file_lock);
46149+ exec_file = current->exec_file;
46150+ current->exec_file = NULL;
46151+ write_unlock(&grsec_exec_file_lock);
46152+
46153+ if (exec_file)
46154+ fput(exec_file);
46155+}
46156+
46157+int
46158+gr_acl_handle_procpidmem(const struct task_struct *task)
46159+{
46160+ if (unlikely(!gr_acl_is_enabled()))
46161+ return 0;
46162+
46163+ if (task != current && task->acl->mode & GR_PROTPROCFD)
46164+ return -EACCES;
46165+
46166+ return 0;
46167+}
46168diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46169--- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46170+++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46171@@ -0,0 +1,381 @@
46172+#include <linux/kernel.h>
46173+#include <asm/uaccess.h>
46174+#include <asm/errno.h>
46175+#include <net/sock.h>
46176+#include <linux/file.h>
46177+#include <linux/fs.h>
46178+#include <linux/net.h>
46179+#include <linux/in.h>
46180+#include <linux/skbuff.h>
46181+#include <linux/ip.h>
46182+#include <linux/udp.h>
46183+#include <linux/types.h>
46184+#include <linux/sched.h>
46185+#include <linux/netdevice.h>
46186+#include <linux/inetdevice.h>
46187+#include <linux/gracl.h>
46188+#include <linux/grsecurity.h>
46189+#include <linux/grinternal.h>
46190+
46191+#define GR_BIND 0x01
46192+#define GR_CONNECT 0x02
46193+#define GR_INVERT 0x04
46194+#define GR_BINDOVERRIDE 0x08
46195+#define GR_CONNECTOVERRIDE 0x10
46196+#define GR_SOCK_FAMILY 0x20
46197+
46198+static const char * gr_protocols[IPPROTO_MAX] = {
46199+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46200+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46201+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46202+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46203+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46204+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46205+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46206+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46207+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46208+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46209+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46210+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46211+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46212+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46213+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46214+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46215+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46216+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46217+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46218+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46219+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46220+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46221+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46222+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46223+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46224+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46225+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46226+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46227+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46228+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46229+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46230+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46231+ };
46232+
46233+static const char * gr_socktypes[SOCK_MAX] = {
46234+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46235+ "unknown:7", "unknown:8", "unknown:9", "packet"
46236+ };
46237+
46238+static const char * gr_sockfamilies[AF_MAX+1] = {
46239+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46240+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46241+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46242+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46243+ };
46244+
46245+const char *
46246+gr_proto_to_name(unsigned char proto)
46247+{
46248+ return gr_protocols[proto];
46249+}
46250+
46251+const char *
46252+gr_socktype_to_name(unsigned char type)
46253+{
46254+ return gr_socktypes[type];
46255+}
46256+
46257+const char *
46258+gr_sockfamily_to_name(unsigned char family)
46259+{
46260+ return gr_sockfamilies[family];
46261+}
46262+
46263+int
46264+gr_search_socket(const int domain, const int type, const int protocol)
46265+{
46266+ struct acl_subject_label *curr;
46267+ const struct cred *cred = current_cred();
46268+
46269+ if (unlikely(!gr_acl_is_enabled()))
46270+ goto exit;
46271+
46272+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
46273+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46274+ goto exit; // let the kernel handle it
46275+
46276+ curr = current->acl;
46277+
46278+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46279+ /* the family is allowed, if this is PF_INET allow it only if
46280+ the extra sock type/protocol checks pass */
46281+ if (domain == PF_INET)
46282+ goto inet_check;
46283+ goto exit;
46284+ } else {
46285+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46286+ __u32 fakeip = 0;
46287+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46288+ current->role->roletype, cred->uid,
46289+ cred->gid, current->exec_file ?
46290+ gr_to_filename(current->exec_file->f_path.dentry,
46291+ current->exec_file->f_path.mnt) :
46292+ curr->filename, curr->filename,
46293+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46294+ &current->signal->saved_ip);
46295+ goto exit;
46296+ }
46297+ goto exit_fail;
46298+ }
46299+
46300+inet_check:
46301+ /* the rest of this checking is for IPv4 only */
46302+ if (!curr->ips)
46303+ goto exit;
46304+
46305+ if ((curr->ip_type & (1 << type)) &&
46306+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46307+ goto exit;
46308+
46309+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46310+ /* we don't place acls on raw sockets , and sometimes
46311+ dgram/ip sockets are opened for ioctl and not
46312+ bind/connect, so we'll fake a bind learn log */
46313+ if (type == SOCK_RAW || type == SOCK_PACKET) {
46314+ __u32 fakeip = 0;
46315+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46316+ current->role->roletype, cred->uid,
46317+ cred->gid, current->exec_file ?
46318+ gr_to_filename(current->exec_file->f_path.dentry,
46319+ current->exec_file->f_path.mnt) :
46320+ curr->filename, curr->filename,
46321+ &fakeip, 0, type,
46322+ protocol, GR_CONNECT, &current->signal->saved_ip);
46323+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46324+ __u32 fakeip = 0;
46325+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46326+ current->role->roletype, cred->uid,
46327+ cred->gid, current->exec_file ?
46328+ gr_to_filename(current->exec_file->f_path.dentry,
46329+ current->exec_file->f_path.mnt) :
46330+ curr->filename, curr->filename,
46331+ &fakeip, 0, type,
46332+ protocol, GR_BIND, &current->signal->saved_ip);
46333+ }
46334+ /* we'll log when they use connect or bind */
46335+ goto exit;
46336+ }
46337+
46338+exit_fail:
46339+ if (domain == PF_INET)
46340+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46341+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
46342+ else
46343+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46344+ gr_socktype_to_name(type), protocol);
46345+
46346+ return 0;
46347+exit:
46348+ return 1;
46349+}
46350+
46351+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46352+{
46353+ if ((ip->mode & mode) &&
46354+ (ip_port >= ip->low) &&
46355+ (ip_port <= ip->high) &&
46356+ ((ntohl(ip_addr) & our_netmask) ==
46357+ (ntohl(our_addr) & our_netmask))
46358+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46359+ && (ip->type & (1 << type))) {
46360+ if (ip->mode & GR_INVERT)
46361+ return 2; // specifically denied
46362+ else
46363+ return 1; // allowed
46364+ }
46365+
46366+ return 0; // not specifically allowed, may continue parsing
46367+}
46368+
46369+static int
46370+gr_search_connectbind(const int full_mode, struct sock *sk,
46371+ struct sockaddr_in *addr, const int type)
46372+{
46373+ char iface[IFNAMSIZ] = {0};
46374+ struct acl_subject_label *curr;
46375+ struct acl_ip_label *ip;
46376+ struct inet_sock *isk;
46377+ struct net_device *dev;
46378+ struct in_device *idev;
46379+ unsigned long i;
46380+ int ret;
46381+ int mode = full_mode & (GR_BIND | GR_CONNECT);
46382+ __u32 ip_addr = 0;
46383+ __u32 our_addr;
46384+ __u32 our_netmask;
46385+ char *p;
46386+ __u16 ip_port = 0;
46387+ const struct cred *cred = current_cred();
46388+
46389+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46390+ return 0;
46391+
46392+ curr = current->acl;
46393+ isk = inet_sk(sk);
46394+
46395+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46396+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46397+ addr->sin_addr.s_addr = curr->inaddr_any_override;
46398+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46399+ struct sockaddr_in saddr;
46400+ int err;
46401+
46402+ saddr.sin_family = AF_INET;
46403+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
46404+ saddr.sin_port = isk->inet_sport;
46405+
46406+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46407+ if (err)
46408+ return err;
46409+
46410+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46411+ if (err)
46412+ return err;
46413+ }
46414+
46415+ if (!curr->ips)
46416+ return 0;
46417+
46418+ ip_addr = addr->sin_addr.s_addr;
46419+ ip_port = ntohs(addr->sin_port);
46420+
46421+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46422+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46423+ current->role->roletype, cred->uid,
46424+ cred->gid, current->exec_file ?
46425+ gr_to_filename(current->exec_file->f_path.dentry,
46426+ current->exec_file->f_path.mnt) :
46427+ curr->filename, curr->filename,
46428+ &ip_addr, ip_port, type,
46429+ sk->sk_protocol, mode, &current->signal->saved_ip);
46430+ return 0;
46431+ }
46432+
46433+ for (i = 0; i < curr->ip_num; i++) {
46434+ ip = *(curr->ips + i);
46435+ if (ip->iface != NULL) {
46436+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
46437+ p = strchr(iface, ':');
46438+ if (p != NULL)
46439+ *p = '\0';
46440+ dev = dev_get_by_name(sock_net(sk), iface);
46441+ if (dev == NULL)
46442+ continue;
46443+ idev = in_dev_get(dev);
46444+ if (idev == NULL) {
46445+ dev_put(dev);
46446+ continue;
46447+ }
46448+ rcu_read_lock();
46449+ for_ifa(idev) {
46450+ if (!strcmp(ip->iface, ifa->ifa_label)) {
46451+ our_addr = ifa->ifa_address;
46452+ our_netmask = 0xffffffff;
46453+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46454+ if (ret == 1) {
46455+ rcu_read_unlock();
46456+ in_dev_put(idev);
46457+ dev_put(dev);
46458+ return 0;
46459+ } else if (ret == 2) {
46460+ rcu_read_unlock();
46461+ in_dev_put(idev);
46462+ dev_put(dev);
46463+ goto denied;
46464+ }
46465+ }
46466+ } endfor_ifa(idev);
46467+ rcu_read_unlock();
46468+ in_dev_put(idev);
46469+ dev_put(dev);
46470+ } else {
46471+ our_addr = ip->addr;
46472+ our_netmask = ip->netmask;
46473+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46474+ if (ret == 1)
46475+ return 0;
46476+ else if (ret == 2)
46477+ goto denied;
46478+ }
46479+ }
46480+
46481+denied:
46482+ if (mode == GR_BIND)
46483+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46484+ else if (mode == GR_CONNECT)
46485+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46486+
46487+ return -EACCES;
46488+}
46489+
46490+int
46491+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46492+{
46493+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46494+}
46495+
46496+int
46497+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46498+{
46499+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46500+}
46501+
46502+int gr_search_listen(struct socket *sock)
46503+{
46504+ struct sock *sk = sock->sk;
46505+ struct sockaddr_in addr;
46506+
46507+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46508+ addr.sin_port = inet_sk(sk)->inet_sport;
46509+
46510+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46511+}
46512+
46513+int gr_search_accept(struct socket *sock)
46514+{
46515+ struct sock *sk = sock->sk;
46516+ struct sockaddr_in addr;
46517+
46518+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46519+ addr.sin_port = inet_sk(sk)->inet_sport;
46520+
46521+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46522+}
46523+
46524+int
46525+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46526+{
46527+ if (addr)
46528+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46529+ else {
46530+ struct sockaddr_in sin;
46531+ const struct inet_sock *inet = inet_sk(sk);
46532+
46533+ sin.sin_addr.s_addr = inet->inet_daddr;
46534+ sin.sin_port = inet->inet_dport;
46535+
46536+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46537+ }
46538+}
46539+
46540+int
46541+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46542+{
46543+ struct sockaddr_in sin;
46544+
46545+ if (unlikely(skb->len < sizeof (struct udphdr)))
46546+ return 0; // skip this packet
46547+
46548+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46549+ sin.sin_port = udp_hdr(skb)->source;
46550+
46551+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46552+}
46553diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46554--- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46555+++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46556@@ -0,0 +1,207 @@
46557+#include <linux/kernel.h>
46558+#include <linux/mm.h>
46559+#include <linux/sched.h>
46560+#include <linux/poll.h>
46561+#include <linux/string.h>
46562+#include <linux/file.h>
46563+#include <linux/types.h>
46564+#include <linux/vmalloc.h>
46565+#include <linux/grinternal.h>
46566+
46567+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46568+ size_t count, loff_t *ppos);
46569+extern int gr_acl_is_enabled(void);
46570+
46571+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46572+static int gr_learn_attached;
46573+
46574+/* use a 512k buffer */
46575+#define LEARN_BUFFER_SIZE (512 * 1024)
46576+
46577+static DEFINE_SPINLOCK(gr_learn_lock);
46578+static DEFINE_MUTEX(gr_learn_user_mutex);
46579+
46580+/* we need to maintain two buffers, so that the kernel context of grlearn
46581+ uses a semaphore around the userspace copying, and the other kernel contexts
46582+ use a spinlock when copying into the buffer, since they cannot sleep
46583+*/
46584+static char *learn_buffer;
46585+static char *learn_buffer_user;
46586+static int learn_buffer_len;
46587+static int learn_buffer_user_len;
46588+
46589+static ssize_t
46590+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46591+{
46592+ DECLARE_WAITQUEUE(wait, current);
46593+ ssize_t retval = 0;
46594+
46595+ add_wait_queue(&learn_wait, &wait);
46596+ set_current_state(TASK_INTERRUPTIBLE);
46597+ do {
46598+ mutex_lock(&gr_learn_user_mutex);
46599+ spin_lock(&gr_learn_lock);
46600+ if (learn_buffer_len)
46601+ break;
46602+ spin_unlock(&gr_learn_lock);
46603+ mutex_unlock(&gr_learn_user_mutex);
46604+ if (file->f_flags & O_NONBLOCK) {
46605+ retval = -EAGAIN;
46606+ goto out;
46607+ }
46608+ if (signal_pending(current)) {
46609+ retval = -ERESTARTSYS;
46610+ goto out;
46611+ }
46612+
46613+ schedule();
46614+ } while (1);
46615+
46616+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46617+ learn_buffer_user_len = learn_buffer_len;
46618+ retval = learn_buffer_len;
46619+ learn_buffer_len = 0;
46620+
46621+ spin_unlock(&gr_learn_lock);
46622+
46623+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46624+ retval = -EFAULT;
46625+
46626+ mutex_unlock(&gr_learn_user_mutex);
46627+out:
46628+ set_current_state(TASK_RUNNING);
46629+ remove_wait_queue(&learn_wait, &wait);
46630+ return retval;
46631+}
46632+
46633+static unsigned int
46634+poll_learn(struct file * file, poll_table * wait)
46635+{
46636+ poll_wait(file, &learn_wait, wait);
46637+
46638+ if (learn_buffer_len)
46639+ return (POLLIN | POLLRDNORM);
46640+
46641+ return 0;
46642+}
46643+
46644+void
46645+gr_clear_learn_entries(void)
46646+{
46647+ char *tmp;
46648+
46649+ mutex_lock(&gr_learn_user_mutex);
46650+ spin_lock(&gr_learn_lock);
46651+ tmp = learn_buffer;
46652+ learn_buffer = NULL;
46653+ spin_unlock(&gr_learn_lock);
46654+ if (tmp)
46655+ vfree(tmp);
46656+ if (learn_buffer_user != NULL) {
46657+ vfree(learn_buffer_user);
46658+ learn_buffer_user = NULL;
46659+ }
46660+ learn_buffer_len = 0;
46661+ mutex_unlock(&gr_learn_user_mutex);
46662+
46663+ return;
46664+}
46665+
46666+void
46667+gr_add_learn_entry(const char *fmt, ...)
46668+{
46669+ va_list args;
46670+ unsigned int len;
46671+
46672+ if (!gr_learn_attached)
46673+ return;
46674+
46675+ spin_lock(&gr_learn_lock);
46676+
46677+ /* leave a gap at the end so we know when it's "full" but don't have to
46678+ compute the exact length of the string we're trying to append
46679+ */
46680+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46681+ spin_unlock(&gr_learn_lock);
46682+ wake_up_interruptible(&learn_wait);
46683+ return;
46684+ }
46685+ if (learn_buffer == NULL) {
46686+ spin_unlock(&gr_learn_lock);
46687+ return;
46688+ }
46689+
46690+ va_start(args, fmt);
46691+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46692+ va_end(args);
46693+
46694+ learn_buffer_len += len + 1;
46695+
46696+ spin_unlock(&gr_learn_lock);
46697+ wake_up_interruptible(&learn_wait);
46698+
46699+ return;
46700+}
46701+
46702+static int
46703+open_learn(struct inode *inode, struct file *file)
46704+{
46705+ if (file->f_mode & FMODE_READ && gr_learn_attached)
46706+ return -EBUSY;
46707+ if (file->f_mode & FMODE_READ) {
46708+ int retval = 0;
46709+ mutex_lock(&gr_learn_user_mutex);
46710+ if (learn_buffer == NULL)
46711+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46712+ if (learn_buffer_user == NULL)
46713+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46714+ if (learn_buffer == NULL) {
46715+ retval = -ENOMEM;
46716+ goto out_error;
46717+ }
46718+ if (learn_buffer_user == NULL) {
46719+ retval = -ENOMEM;
46720+ goto out_error;
46721+ }
46722+ learn_buffer_len = 0;
46723+ learn_buffer_user_len = 0;
46724+ gr_learn_attached = 1;
46725+out_error:
46726+ mutex_unlock(&gr_learn_user_mutex);
46727+ return retval;
46728+ }
46729+ return 0;
46730+}
46731+
46732+static int
46733+close_learn(struct inode *inode, struct file *file)
46734+{
46735+ if (file->f_mode & FMODE_READ) {
46736+ char *tmp = NULL;
46737+ mutex_lock(&gr_learn_user_mutex);
46738+ spin_lock(&gr_learn_lock);
46739+ tmp = learn_buffer;
46740+ learn_buffer = NULL;
46741+ spin_unlock(&gr_learn_lock);
46742+ if (tmp)
46743+ vfree(tmp);
46744+ if (learn_buffer_user != NULL) {
46745+ vfree(learn_buffer_user);
46746+ learn_buffer_user = NULL;
46747+ }
46748+ learn_buffer_len = 0;
46749+ learn_buffer_user_len = 0;
46750+ gr_learn_attached = 0;
46751+ mutex_unlock(&gr_learn_user_mutex);
46752+ }
46753+
46754+ return 0;
46755+}
46756+
46757+const struct file_operations grsec_fops = {
46758+ .read = read_learn,
46759+ .write = write_grsec_handler,
46760+ .open = open_learn,
46761+ .release = close_learn,
46762+ .poll = poll_learn,
46763+};
46764diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46765--- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46766+++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46767@@ -0,0 +1,68 @@
46768+#include <linux/kernel.h>
46769+#include <linux/sched.h>
46770+#include <linux/gracl.h>
46771+#include <linux/grinternal.h>
46772+
46773+static const char *restab_log[] = {
46774+ [RLIMIT_CPU] = "RLIMIT_CPU",
46775+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46776+ [RLIMIT_DATA] = "RLIMIT_DATA",
46777+ [RLIMIT_STACK] = "RLIMIT_STACK",
46778+ [RLIMIT_CORE] = "RLIMIT_CORE",
46779+ [RLIMIT_RSS] = "RLIMIT_RSS",
46780+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
46781+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46782+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46783+ [RLIMIT_AS] = "RLIMIT_AS",
46784+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46785+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46786+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46787+ [RLIMIT_NICE] = "RLIMIT_NICE",
46788+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46789+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46790+ [GR_CRASH_RES] = "RLIMIT_CRASH"
46791+};
46792+
46793+void
46794+gr_log_resource(const struct task_struct *task,
46795+ const int res, const unsigned long wanted, const int gt)
46796+{
46797+ const struct cred *cred;
46798+ unsigned long rlim;
46799+
46800+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
46801+ return;
46802+
46803+ // not yet supported resource
46804+ if (unlikely(!restab_log[res]))
46805+ return;
46806+
46807+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46808+ rlim = task_rlimit_max(task, res);
46809+ else
46810+ rlim = task_rlimit(task, res);
46811+
46812+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46813+ return;
46814+
46815+ rcu_read_lock();
46816+ cred = __task_cred(task);
46817+
46818+ if (res == RLIMIT_NPROC &&
46819+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46820+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46821+ goto out_rcu_unlock;
46822+ else if (res == RLIMIT_MEMLOCK &&
46823+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46824+ goto out_rcu_unlock;
46825+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46826+ goto out_rcu_unlock;
46827+ rcu_read_unlock();
46828+
46829+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46830+
46831+ return;
46832+out_rcu_unlock:
46833+ rcu_read_unlock();
46834+ return;
46835+}
46836diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46837--- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46838+++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46839@@ -0,0 +1,299 @@
46840+#include <linux/kernel.h>
46841+#include <linux/mm.h>
46842+#include <asm/uaccess.h>
46843+#include <asm/errno.h>
46844+#include <asm/mman.h>
46845+#include <net/sock.h>
46846+#include <linux/file.h>
46847+#include <linux/fs.h>
46848+#include <linux/net.h>
46849+#include <linux/in.h>
46850+#include <linux/slab.h>
46851+#include <linux/types.h>
46852+#include <linux/sched.h>
46853+#include <linux/timer.h>
46854+#include <linux/gracl.h>
46855+#include <linux/grsecurity.h>
46856+#include <linux/grinternal.h>
46857+
46858+static struct crash_uid *uid_set;
46859+static unsigned short uid_used;
46860+static DEFINE_SPINLOCK(gr_uid_lock);
46861+extern rwlock_t gr_inode_lock;
46862+extern struct acl_subject_label *
46863+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46864+ struct acl_role_label *role);
46865+
46866+#ifdef CONFIG_BTRFS_FS
46867+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46868+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46869+#endif
46870+
46871+static inline dev_t __get_dev(const struct dentry *dentry)
46872+{
46873+#ifdef CONFIG_BTRFS_FS
46874+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46875+ return get_btrfs_dev_from_inode(dentry->d_inode);
46876+ else
46877+#endif
46878+ return dentry->d_inode->i_sb->s_dev;
46879+}
46880+
46881+int
46882+gr_init_uidset(void)
46883+{
46884+ uid_set =
46885+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46886+ uid_used = 0;
46887+
46888+ return uid_set ? 1 : 0;
46889+}
46890+
46891+void
46892+gr_free_uidset(void)
46893+{
46894+ if (uid_set)
46895+ kfree(uid_set);
46896+
46897+ return;
46898+}
46899+
46900+int
46901+gr_find_uid(const uid_t uid)
46902+{
46903+ struct crash_uid *tmp = uid_set;
46904+ uid_t buid;
46905+ int low = 0, high = uid_used - 1, mid;
46906+
46907+ while (high >= low) {
46908+ mid = (low + high) >> 1;
46909+ buid = tmp[mid].uid;
46910+ if (buid == uid)
46911+ return mid;
46912+ if (buid > uid)
46913+ high = mid - 1;
46914+ if (buid < uid)
46915+ low = mid + 1;
46916+ }
46917+
46918+ return -1;
46919+}
46920+
46921+static __inline__ void
46922+gr_insertsort(void)
46923+{
46924+ unsigned short i, j;
46925+ struct crash_uid index;
46926+
46927+ for (i = 1; i < uid_used; i++) {
46928+ index = uid_set[i];
46929+ j = i;
46930+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46931+ uid_set[j] = uid_set[j - 1];
46932+ j--;
46933+ }
46934+ uid_set[j] = index;
46935+ }
46936+
46937+ return;
46938+}
46939+
46940+static __inline__ void
46941+gr_insert_uid(const uid_t uid, const unsigned long expires)
46942+{
46943+ int loc;
46944+
46945+ if (uid_used == GR_UIDTABLE_MAX)
46946+ return;
46947+
46948+ loc = gr_find_uid(uid);
46949+
46950+ if (loc >= 0) {
46951+ uid_set[loc].expires = expires;
46952+ return;
46953+ }
46954+
46955+ uid_set[uid_used].uid = uid;
46956+ uid_set[uid_used].expires = expires;
46957+ uid_used++;
46958+
46959+ gr_insertsort();
46960+
46961+ return;
46962+}
46963+
46964+void
46965+gr_remove_uid(const unsigned short loc)
46966+{
46967+ unsigned short i;
46968+
46969+ for (i = loc + 1; i < uid_used; i++)
46970+ uid_set[i - 1] = uid_set[i];
46971+
46972+ uid_used--;
46973+
46974+ return;
46975+}
46976+
46977+int
46978+gr_check_crash_uid(const uid_t uid)
46979+{
46980+ int loc;
46981+ int ret = 0;
46982+
46983+ if (unlikely(!gr_acl_is_enabled()))
46984+ return 0;
46985+
46986+ spin_lock(&gr_uid_lock);
46987+ loc = gr_find_uid(uid);
46988+
46989+ if (loc < 0)
46990+ goto out_unlock;
46991+
46992+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
46993+ gr_remove_uid(loc);
46994+ else
46995+ ret = 1;
46996+
46997+out_unlock:
46998+ spin_unlock(&gr_uid_lock);
46999+ return ret;
47000+}
47001+
47002+static __inline__ int
47003+proc_is_setxid(const struct cred *cred)
47004+{
47005+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
47006+ cred->uid != cred->fsuid)
47007+ return 1;
47008+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
47009+ cred->gid != cred->fsgid)
47010+ return 1;
47011+
47012+ return 0;
47013+}
47014+
47015+extern int gr_fake_force_sig(int sig, struct task_struct *t);
47016+
47017+void
47018+gr_handle_crash(struct task_struct *task, const int sig)
47019+{
47020+ struct acl_subject_label *curr;
47021+ struct acl_subject_label *curr2;
47022+ struct task_struct *tsk, *tsk2;
47023+ const struct cred *cred;
47024+ const struct cred *cred2;
47025+
47026+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
47027+ return;
47028+
47029+ if (unlikely(!gr_acl_is_enabled()))
47030+ return;
47031+
47032+ curr = task->acl;
47033+
47034+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
47035+ return;
47036+
47037+ if (time_before_eq(curr->expires, get_seconds())) {
47038+ curr->expires = 0;
47039+ curr->crashes = 0;
47040+ }
47041+
47042+ curr->crashes++;
47043+
47044+ if (!curr->expires)
47045+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
47046+
47047+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47048+ time_after(curr->expires, get_seconds())) {
47049+ rcu_read_lock();
47050+ cred = __task_cred(task);
47051+ if (cred->uid && proc_is_setxid(cred)) {
47052+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47053+ spin_lock(&gr_uid_lock);
47054+ gr_insert_uid(cred->uid, curr->expires);
47055+ spin_unlock(&gr_uid_lock);
47056+ curr->expires = 0;
47057+ curr->crashes = 0;
47058+ read_lock(&tasklist_lock);
47059+ do_each_thread(tsk2, tsk) {
47060+ cred2 = __task_cred(tsk);
47061+ if (tsk != task && cred2->uid == cred->uid)
47062+ gr_fake_force_sig(SIGKILL, tsk);
47063+ } while_each_thread(tsk2, tsk);
47064+ read_unlock(&tasklist_lock);
47065+ } else {
47066+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47067+ read_lock(&tasklist_lock);
47068+ do_each_thread(tsk2, tsk) {
47069+ if (likely(tsk != task)) {
47070+ curr2 = tsk->acl;
47071+
47072+ if (curr2->device == curr->device &&
47073+ curr2->inode == curr->inode)
47074+ gr_fake_force_sig(SIGKILL, tsk);
47075+ }
47076+ } while_each_thread(tsk2, tsk);
47077+ read_unlock(&tasklist_lock);
47078+ }
47079+ rcu_read_unlock();
47080+ }
47081+
47082+ return;
47083+}
47084+
47085+int
47086+gr_check_crash_exec(const struct file *filp)
47087+{
47088+ struct acl_subject_label *curr;
47089+
47090+ if (unlikely(!gr_acl_is_enabled()))
47091+ return 0;
47092+
47093+ read_lock(&gr_inode_lock);
47094+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47095+ __get_dev(filp->f_path.dentry),
47096+ current->role);
47097+ read_unlock(&gr_inode_lock);
47098+
47099+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47100+ (!curr->crashes && !curr->expires))
47101+ return 0;
47102+
47103+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47104+ time_after(curr->expires, get_seconds()))
47105+ return 1;
47106+ else if (time_before_eq(curr->expires, get_seconds())) {
47107+ curr->crashes = 0;
47108+ curr->expires = 0;
47109+ }
47110+
47111+ return 0;
47112+}
47113+
47114+void
47115+gr_handle_alertkill(struct task_struct *task)
47116+{
47117+ struct acl_subject_label *curracl;
47118+ __u32 curr_ip;
47119+ struct task_struct *p, *p2;
47120+
47121+ if (unlikely(!gr_acl_is_enabled()))
47122+ return;
47123+
47124+ curracl = task->acl;
47125+ curr_ip = task->signal->curr_ip;
47126+
47127+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47128+ read_lock(&tasklist_lock);
47129+ do_each_thread(p2, p) {
47130+ if (p->signal->curr_ip == curr_ip)
47131+ gr_fake_force_sig(SIGKILL, p);
47132+ } while_each_thread(p2, p);
47133+ read_unlock(&tasklist_lock);
47134+ } else if (curracl->mode & GR_KILLPROC)
47135+ gr_fake_force_sig(SIGKILL, task);
47136+
47137+ return;
47138+}
47139diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
47140--- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47141+++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
47142@@ -0,0 +1,40 @@
47143+#include <linux/kernel.h>
47144+#include <linux/mm.h>
47145+#include <linux/sched.h>
47146+#include <linux/file.h>
47147+#include <linux/ipc.h>
47148+#include <linux/gracl.h>
47149+#include <linux/grsecurity.h>
47150+#include <linux/grinternal.h>
47151+
47152+int
47153+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47154+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47155+{
47156+ struct task_struct *task;
47157+
47158+ if (!gr_acl_is_enabled())
47159+ return 1;
47160+
47161+ rcu_read_lock();
47162+ read_lock(&tasklist_lock);
47163+
47164+ task = find_task_by_vpid(shm_cprid);
47165+
47166+ if (unlikely(!task))
47167+ task = find_task_by_vpid(shm_lapid);
47168+
47169+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47170+ (task->pid == shm_lapid)) &&
47171+ (task->acl->mode & GR_PROTSHM) &&
47172+ (task->acl != current->acl))) {
47173+ read_unlock(&tasklist_lock);
47174+ rcu_read_unlock();
47175+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47176+ return 0;
47177+ }
47178+ read_unlock(&tasklist_lock);
47179+ rcu_read_unlock();
47180+
47181+ return 1;
47182+}
47183diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47184--- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47185+++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47186@@ -0,0 +1,19 @@
47187+#include <linux/kernel.h>
47188+#include <linux/sched.h>
47189+#include <linux/fs.h>
47190+#include <linux/file.h>
47191+#include <linux/grsecurity.h>
47192+#include <linux/grinternal.h>
47193+
47194+void
47195+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47196+{
47197+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47198+ if ((grsec_enable_chdir && grsec_enable_group &&
47199+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47200+ !grsec_enable_group)) {
47201+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47202+ }
47203+#endif
47204+ return;
47205+}
47206diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47207--- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47208+++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47209@@ -0,0 +1,349 @@
47210+#include <linux/kernel.h>
47211+#include <linux/module.h>
47212+#include <linux/sched.h>
47213+#include <linux/file.h>
47214+#include <linux/fs.h>
47215+#include <linux/mount.h>
47216+#include <linux/types.h>
47217+#include <linux/pid_namespace.h>
47218+#include <linux/grsecurity.h>
47219+#include <linux/grinternal.h>
47220+
47221+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47222+{
47223+#ifdef CONFIG_GRKERNSEC
47224+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47225+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47226+ task->gr_is_chrooted = 1;
47227+ else
47228+ task->gr_is_chrooted = 0;
47229+
47230+ task->gr_chroot_dentry = path->dentry;
47231+#endif
47232+ return;
47233+}
47234+
47235+void gr_clear_chroot_entries(struct task_struct *task)
47236+{
47237+#ifdef CONFIG_GRKERNSEC
47238+ task->gr_is_chrooted = 0;
47239+ task->gr_chroot_dentry = NULL;
47240+#endif
47241+ return;
47242+}
47243+
47244+int
47245+gr_handle_chroot_unix(const pid_t pid)
47246+{
47247+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47248+ struct task_struct *p;
47249+
47250+ if (unlikely(!grsec_enable_chroot_unix))
47251+ return 1;
47252+
47253+ if (likely(!proc_is_chrooted(current)))
47254+ return 1;
47255+
47256+ rcu_read_lock();
47257+ read_lock(&tasklist_lock);
47258+ p = find_task_by_vpid_unrestricted(pid);
47259+ if (unlikely(p && !have_same_root(current, p))) {
47260+ read_unlock(&tasklist_lock);
47261+ rcu_read_unlock();
47262+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47263+ return 0;
47264+ }
47265+ read_unlock(&tasklist_lock);
47266+ rcu_read_unlock();
47267+#endif
47268+ return 1;
47269+}
47270+
47271+int
47272+gr_handle_chroot_nice(void)
47273+{
47274+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47275+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47276+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47277+ return -EPERM;
47278+ }
47279+#endif
47280+ return 0;
47281+}
47282+
47283+int
47284+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47285+{
47286+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47287+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47288+ && proc_is_chrooted(current)) {
47289+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47290+ return -EACCES;
47291+ }
47292+#endif
47293+ return 0;
47294+}
47295+
47296+int
47297+gr_handle_chroot_rawio(const struct inode *inode)
47298+{
47299+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47300+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47301+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47302+ return 1;
47303+#endif
47304+ return 0;
47305+}
47306+
47307+int
47308+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47309+{
47310+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47311+ struct task_struct *p;
47312+ int ret = 0;
47313+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47314+ return ret;
47315+
47316+ read_lock(&tasklist_lock);
47317+ do_each_pid_task(pid, type, p) {
47318+ if (!have_same_root(current, p)) {
47319+ ret = 1;
47320+ goto out;
47321+ }
47322+ } while_each_pid_task(pid, type, p);
47323+out:
47324+ read_unlock(&tasklist_lock);
47325+ return ret;
47326+#endif
47327+ return 0;
47328+}
47329+
47330+int
47331+gr_pid_is_chrooted(struct task_struct *p)
47332+{
47333+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47334+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47335+ return 0;
47336+
47337+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47338+ !have_same_root(current, p)) {
47339+ return 1;
47340+ }
47341+#endif
47342+ return 0;
47343+}
47344+
47345+EXPORT_SYMBOL(gr_pid_is_chrooted);
47346+
47347+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47348+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47349+{
47350+ struct path path, currentroot;
47351+ int ret = 0;
47352+
47353+ path.dentry = (struct dentry *)u_dentry;
47354+ path.mnt = (struct vfsmount *)u_mnt;
47355+ get_fs_root(current->fs, &currentroot);
47356+ if (path_is_under(&path, &currentroot))
47357+ ret = 1;
47358+ path_put(&currentroot);
47359+
47360+ return ret;
47361+}
47362+#endif
47363+
47364+int
47365+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47366+{
47367+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47368+ if (!grsec_enable_chroot_fchdir)
47369+ return 1;
47370+
47371+ if (!proc_is_chrooted(current))
47372+ return 1;
47373+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47374+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47375+ return 0;
47376+ }
47377+#endif
47378+ return 1;
47379+}
47380+
47381+int
47382+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47383+ const time_t shm_createtime)
47384+{
47385+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47386+ struct task_struct *p;
47387+ time_t starttime;
47388+
47389+ if (unlikely(!grsec_enable_chroot_shmat))
47390+ return 1;
47391+
47392+ if (likely(!proc_is_chrooted(current)))
47393+ return 1;
47394+
47395+ rcu_read_lock();
47396+ read_lock(&tasklist_lock);
47397+
47398+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47399+ starttime = p->start_time.tv_sec;
47400+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47401+ if (have_same_root(current, p)) {
47402+ goto allow;
47403+ } else {
47404+ read_unlock(&tasklist_lock);
47405+ rcu_read_unlock();
47406+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47407+ return 0;
47408+ }
47409+ }
47410+ /* creator exited, pid reuse, fall through to next check */
47411+ }
47412+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47413+ if (unlikely(!have_same_root(current, p))) {
47414+ read_unlock(&tasklist_lock);
47415+ rcu_read_unlock();
47416+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47417+ return 0;
47418+ }
47419+ }
47420+
47421+allow:
47422+ read_unlock(&tasklist_lock);
47423+ rcu_read_unlock();
47424+#endif
47425+ return 1;
47426+}
47427+
47428+void
47429+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47430+{
47431+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47432+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47433+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47434+#endif
47435+ return;
47436+}
47437+
47438+int
47439+gr_handle_chroot_mknod(const struct dentry *dentry,
47440+ const struct vfsmount *mnt, const int mode)
47441+{
47442+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47443+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47444+ proc_is_chrooted(current)) {
47445+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47446+ return -EPERM;
47447+ }
47448+#endif
47449+ return 0;
47450+}
47451+
47452+int
47453+gr_handle_chroot_mount(const struct dentry *dentry,
47454+ const struct vfsmount *mnt, const char *dev_name)
47455+{
47456+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47457+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47458+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47459+ return -EPERM;
47460+ }
47461+#endif
47462+ return 0;
47463+}
47464+
47465+int
47466+gr_handle_chroot_pivot(void)
47467+{
47468+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47469+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47470+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47471+ return -EPERM;
47472+ }
47473+#endif
47474+ return 0;
47475+}
47476+
47477+int
47478+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47479+{
47480+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47481+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47482+ !gr_is_outside_chroot(dentry, mnt)) {
47483+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47484+ return -EPERM;
47485+ }
47486+#endif
47487+ return 0;
47488+}
47489+
47490+int
47491+gr_handle_chroot_caps(struct path *path)
47492+{
47493+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47494+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47495+ (init_task.fs->root.dentry != path->dentry) &&
47496+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47497+
47498+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47499+ const struct cred *old = current_cred();
47500+ struct cred *new = prepare_creds();
47501+ if (new == NULL)
47502+ return 1;
47503+
47504+ new->cap_permitted = cap_drop(old->cap_permitted,
47505+ chroot_caps);
47506+ new->cap_inheritable = cap_drop(old->cap_inheritable,
47507+ chroot_caps);
47508+ new->cap_effective = cap_drop(old->cap_effective,
47509+ chroot_caps);
47510+
47511+ commit_creds(new);
47512+
47513+ return 0;
47514+ }
47515+#endif
47516+ return 0;
47517+}
47518+
47519+int
47520+gr_handle_chroot_sysctl(const int op)
47521+{
47522+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47523+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47524+ proc_is_chrooted(current))
47525+ return -EACCES;
47526+#endif
47527+ return 0;
47528+}
47529+
47530+void
47531+gr_handle_chroot_chdir(struct path *path)
47532+{
47533+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47534+ if (grsec_enable_chroot_chdir)
47535+ set_fs_pwd(current->fs, path);
47536+#endif
47537+ return;
47538+}
47539+
47540+int
47541+gr_handle_chroot_chmod(const struct dentry *dentry,
47542+ const struct vfsmount *mnt, const int mode)
47543+{
47544+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47545+ /* allow chmod +s on directories, but not files */
47546+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47547+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47548+ proc_is_chrooted(current)) {
47549+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47550+ return -EPERM;
47551+ }
47552+#endif
47553+ return 0;
47554+}
47555+
47556+#ifdef CONFIG_SECURITY
47557+EXPORT_SYMBOL(gr_handle_chroot_caps);
47558+#endif
47559diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47560--- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47561+++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47562@@ -0,0 +1,447 @@
47563+#include <linux/kernel.h>
47564+#include <linux/module.h>
47565+#include <linux/sched.h>
47566+#include <linux/file.h>
47567+#include <linux/fs.h>
47568+#include <linux/kdev_t.h>
47569+#include <linux/net.h>
47570+#include <linux/in.h>
47571+#include <linux/ip.h>
47572+#include <linux/skbuff.h>
47573+#include <linux/sysctl.h>
47574+
47575+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47576+void
47577+pax_set_initial_flags(struct linux_binprm *bprm)
47578+{
47579+ return;
47580+}
47581+#endif
47582+
47583+#ifdef CONFIG_SYSCTL
47584+__u32
47585+gr_handle_sysctl(const struct ctl_table * table, const int op)
47586+{
47587+ return 0;
47588+}
47589+#endif
47590+
47591+#ifdef CONFIG_TASKSTATS
47592+int gr_is_taskstats_denied(int pid)
47593+{
47594+ return 0;
47595+}
47596+#endif
47597+
47598+int
47599+gr_acl_is_enabled(void)
47600+{
47601+ return 0;
47602+}
47603+
47604+int
47605+gr_handle_rawio(const struct inode *inode)
47606+{
47607+ return 0;
47608+}
47609+
47610+void
47611+gr_acl_handle_psacct(struct task_struct *task, const long code)
47612+{
47613+ return;
47614+}
47615+
47616+int
47617+gr_handle_ptrace(struct task_struct *task, const long request)
47618+{
47619+ return 0;
47620+}
47621+
47622+int
47623+gr_handle_proc_ptrace(struct task_struct *task)
47624+{
47625+ return 0;
47626+}
47627+
47628+void
47629+gr_learn_resource(const struct task_struct *task,
47630+ const int res, const unsigned long wanted, const int gt)
47631+{
47632+ return;
47633+}
47634+
47635+int
47636+gr_set_acls(const int type)
47637+{
47638+ return 0;
47639+}
47640+
47641+int
47642+gr_check_hidden_task(const struct task_struct *tsk)
47643+{
47644+ return 0;
47645+}
47646+
47647+int
47648+gr_check_protected_task(const struct task_struct *task)
47649+{
47650+ return 0;
47651+}
47652+
47653+int
47654+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47655+{
47656+ return 0;
47657+}
47658+
47659+void
47660+gr_copy_label(struct task_struct *tsk)
47661+{
47662+ return;
47663+}
47664+
47665+void
47666+gr_set_pax_flags(struct task_struct *task)
47667+{
47668+ return;
47669+}
47670+
47671+int
47672+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47673+ const int unsafe_share)
47674+{
47675+ return 0;
47676+}
47677+
47678+void
47679+gr_handle_delete(const ino_t ino, const dev_t dev)
47680+{
47681+ return;
47682+}
47683+
47684+void
47685+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47686+{
47687+ return;
47688+}
47689+
47690+void
47691+gr_handle_crash(struct task_struct *task, const int sig)
47692+{
47693+ return;
47694+}
47695+
47696+int
47697+gr_check_crash_exec(const struct file *filp)
47698+{
47699+ return 0;
47700+}
47701+
47702+int
47703+gr_check_crash_uid(const uid_t uid)
47704+{
47705+ return 0;
47706+}
47707+
47708+void
47709+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47710+ struct dentry *old_dentry,
47711+ struct dentry *new_dentry,
47712+ struct vfsmount *mnt, const __u8 replace)
47713+{
47714+ return;
47715+}
47716+
47717+int
47718+gr_search_socket(const int family, const int type, const int protocol)
47719+{
47720+ return 1;
47721+}
47722+
47723+int
47724+gr_search_connectbind(const int mode, const struct socket *sock,
47725+ const struct sockaddr_in *addr)
47726+{
47727+ return 0;
47728+}
47729+
47730+int
47731+gr_is_capable(const int cap)
47732+{
47733+ return 1;
47734+}
47735+
47736+int
47737+gr_is_capable_nolog(const int cap)
47738+{
47739+ return 1;
47740+}
47741+
47742+void
47743+gr_handle_alertkill(struct task_struct *task)
47744+{
47745+ return;
47746+}
47747+
47748+__u32
47749+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47750+{
47751+ return 1;
47752+}
47753+
47754+__u32
47755+gr_acl_handle_hidden_file(const struct dentry * dentry,
47756+ const struct vfsmount * mnt)
47757+{
47758+ return 1;
47759+}
47760+
47761+__u32
47762+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47763+ const int fmode)
47764+{
47765+ return 1;
47766+}
47767+
47768+__u32
47769+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47770+{
47771+ return 1;
47772+}
47773+
47774+__u32
47775+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47776+{
47777+ return 1;
47778+}
47779+
47780+int
47781+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47782+ unsigned int *vm_flags)
47783+{
47784+ return 1;
47785+}
47786+
47787+__u32
47788+gr_acl_handle_truncate(const struct dentry * dentry,
47789+ const struct vfsmount * mnt)
47790+{
47791+ return 1;
47792+}
47793+
47794+__u32
47795+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47796+{
47797+ return 1;
47798+}
47799+
47800+__u32
47801+gr_acl_handle_access(const struct dentry * dentry,
47802+ const struct vfsmount * mnt, const int fmode)
47803+{
47804+ return 1;
47805+}
47806+
47807+__u32
47808+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47809+ mode_t mode)
47810+{
47811+ return 1;
47812+}
47813+
47814+__u32
47815+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47816+ mode_t mode)
47817+{
47818+ return 1;
47819+}
47820+
47821+__u32
47822+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47823+{
47824+ return 1;
47825+}
47826+
47827+__u32
47828+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47829+{
47830+ return 1;
47831+}
47832+
47833+void
47834+grsecurity_init(void)
47835+{
47836+ return;
47837+}
47838+
47839+__u32
47840+gr_acl_handle_mknod(const struct dentry * new_dentry,
47841+ const struct dentry * parent_dentry,
47842+ const struct vfsmount * parent_mnt,
47843+ const int mode)
47844+{
47845+ return 1;
47846+}
47847+
47848+__u32
47849+gr_acl_handle_mkdir(const struct dentry * new_dentry,
47850+ const struct dentry * parent_dentry,
47851+ const struct vfsmount * parent_mnt)
47852+{
47853+ return 1;
47854+}
47855+
47856+__u32
47857+gr_acl_handle_symlink(const struct dentry * new_dentry,
47858+ const struct dentry * parent_dentry,
47859+ const struct vfsmount * parent_mnt, const char *from)
47860+{
47861+ return 1;
47862+}
47863+
47864+__u32
47865+gr_acl_handle_link(const struct dentry * new_dentry,
47866+ const struct dentry * parent_dentry,
47867+ const struct vfsmount * parent_mnt,
47868+ const struct dentry * old_dentry,
47869+ const struct vfsmount * old_mnt, const char *to)
47870+{
47871+ return 1;
47872+}
47873+
47874+int
47875+gr_acl_handle_rename(const struct dentry *new_dentry,
47876+ const struct dentry *parent_dentry,
47877+ const struct vfsmount *parent_mnt,
47878+ const struct dentry *old_dentry,
47879+ const struct inode *old_parent_inode,
47880+ const struct vfsmount *old_mnt, const char *newname)
47881+{
47882+ return 0;
47883+}
47884+
47885+int
47886+gr_acl_handle_filldir(const struct file *file, const char *name,
47887+ const int namelen, const ino_t ino)
47888+{
47889+ return 1;
47890+}
47891+
47892+int
47893+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47894+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47895+{
47896+ return 1;
47897+}
47898+
47899+int
47900+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47901+{
47902+ return 0;
47903+}
47904+
47905+int
47906+gr_search_accept(const struct socket *sock)
47907+{
47908+ return 0;
47909+}
47910+
47911+int
47912+gr_search_listen(const struct socket *sock)
47913+{
47914+ return 0;
47915+}
47916+
47917+int
47918+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47919+{
47920+ return 0;
47921+}
47922+
47923+__u32
47924+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47925+{
47926+ return 1;
47927+}
47928+
47929+__u32
47930+gr_acl_handle_creat(const struct dentry * dentry,
47931+ const struct dentry * p_dentry,
47932+ const struct vfsmount * p_mnt, const int fmode,
47933+ const int imode)
47934+{
47935+ return 1;
47936+}
47937+
47938+void
47939+gr_acl_handle_exit(void)
47940+{
47941+ return;
47942+}
47943+
47944+int
47945+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47946+{
47947+ return 1;
47948+}
47949+
47950+void
47951+gr_set_role_label(const uid_t uid, const gid_t gid)
47952+{
47953+ return;
47954+}
47955+
47956+int
47957+gr_acl_handle_procpidmem(const struct task_struct *task)
47958+{
47959+ return 0;
47960+}
47961+
47962+int
47963+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47964+{
47965+ return 0;
47966+}
47967+
47968+int
47969+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47970+{
47971+ return 0;
47972+}
47973+
47974+void
47975+gr_set_kernel_label(struct task_struct *task)
47976+{
47977+ return;
47978+}
47979+
47980+int
47981+gr_check_user_change(int real, int effective, int fs)
47982+{
47983+ return 0;
47984+}
47985+
47986+int
47987+gr_check_group_change(int real, int effective, int fs)
47988+{
47989+ return 0;
47990+}
47991+
47992+int gr_acl_enable_at_secure(void)
47993+{
47994+ return 0;
47995+}
47996+
47997+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47998+{
47999+ return dentry->d_inode->i_sb->s_dev;
48000+}
48001+
48002+EXPORT_SYMBOL(gr_is_capable);
48003+EXPORT_SYMBOL(gr_is_capable_nolog);
48004+EXPORT_SYMBOL(gr_learn_resource);
48005+EXPORT_SYMBOL(gr_set_kernel_label);
48006+#ifdef CONFIG_SECURITY
48007+EXPORT_SYMBOL(gr_check_user_change);
48008+EXPORT_SYMBOL(gr_check_group_change);
48009+#endif
48010diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
48011--- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
48012+++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
48013@@ -0,0 +1,146 @@
48014+#include <linux/kernel.h>
48015+#include <linux/sched.h>
48016+#include <linux/file.h>
48017+#include <linux/binfmts.h>
48018+#include <linux/fs.h>
48019+#include <linux/types.h>
48020+#include <linux/grdefs.h>
48021+#include <linux/grinternal.h>
48022+#include <linux/capability.h>
48023+#include <linux/compat.h>
48024+
48025+#include <asm/uaccess.h>
48026+
48027+#ifdef CONFIG_GRKERNSEC_EXECLOG
48028+static char gr_exec_arg_buf[132];
48029+static DEFINE_MUTEX(gr_exec_arg_mutex);
48030+#endif
48031+
48032+int
48033+gr_handle_nproc(void)
48034+{
48035+#ifdef CONFIG_GRKERNSEC_EXECVE
48036+ const struct cred *cred = current_cred();
48037+ if (grsec_enable_execve && cred->user &&
48038+ (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
48039+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
48040+ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
48041+ return -EAGAIN;
48042+ }
48043+#endif
48044+ return 0;
48045+}
48046+
48047+void
48048+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
48049+{
48050+#ifdef CONFIG_GRKERNSEC_EXECLOG
48051+ char *grarg = gr_exec_arg_buf;
48052+ unsigned int i, x, execlen = 0;
48053+ char c;
48054+
48055+ if (!((grsec_enable_execlog && grsec_enable_group &&
48056+ in_group_p(grsec_audit_gid))
48057+ || (grsec_enable_execlog && !grsec_enable_group)))
48058+ return;
48059+
48060+ mutex_lock(&gr_exec_arg_mutex);
48061+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
48062+
48063+ if (unlikely(argv == NULL))
48064+ goto log;
48065+
48066+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
48067+ const char __user *p;
48068+ unsigned int len;
48069+
48070+ if (copy_from_user(&p, argv + i, sizeof(p)))
48071+ goto log;
48072+ if (!p)
48073+ goto log;
48074+ len = strnlen_user(p, 128 - execlen);
48075+ if (len > 128 - execlen)
48076+ len = 128 - execlen;
48077+ else if (len > 0)
48078+ len--;
48079+ if (copy_from_user(grarg + execlen, p, len))
48080+ goto log;
48081+
48082+ /* rewrite unprintable characters */
48083+ for (x = 0; x < len; x++) {
48084+ c = *(grarg + execlen + x);
48085+ if (c < 32 || c > 126)
48086+ *(grarg + execlen + x) = ' ';
48087+ }
48088+
48089+ execlen += len;
48090+ *(grarg + execlen) = ' ';
48091+ *(grarg + execlen + 1) = '\0';
48092+ execlen++;
48093+ }
48094+
48095+ log:
48096+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48097+ bprm->file->f_path.mnt, grarg);
48098+ mutex_unlock(&gr_exec_arg_mutex);
48099+#endif
48100+ return;
48101+}
48102+
48103+#ifdef CONFIG_COMPAT
48104+void
48105+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
48106+{
48107+#ifdef CONFIG_GRKERNSEC_EXECLOG
48108+ char *grarg = gr_exec_arg_buf;
48109+ unsigned int i, x, execlen = 0;
48110+ char c;
48111+
48112+ if (!((grsec_enable_execlog && grsec_enable_group &&
48113+ in_group_p(grsec_audit_gid))
48114+ || (grsec_enable_execlog && !grsec_enable_group)))
48115+ return;
48116+
48117+ mutex_lock(&gr_exec_arg_mutex);
48118+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
48119+
48120+ if (unlikely(argv == NULL))
48121+ goto log;
48122+
48123+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
48124+ compat_uptr_t p;
48125+ unsigned int len;
48126+
48127+ if (get_user(p, argv + i))
48128+ goto log;
48129+ len = strnlen_user(compat_ptr(p), 128 - execlen);
48130+ if (len > 128 - execlen)
48131+ len = 128 - execlen;
48132+ else if (len > 0)
48133+ len--;
48134+ else
48135+ goto log;
48136+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
48137+ goto log;
48138+
48139+ /* rewrite unprintable characters */
48140+ for (x = 0; x < len; x++) {
48141+ c = *(grarg + execlen + x);
48142+ if (c < 32 || c > 126)
48143+ *(grarg + execlen + x) = ' ';
48144+ }
48145+
48146+ execlen += len;
48147+ *(grarg + execlen) = ' ';
48148+ *(grarg + execlen + 1) = '\0';
48149+ execlen++;
48150+ }
48151+
48152+ log:
48153+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48154+ bprm->file->f_path.mnt, grarg);
48155+ mutex_unlock(&gr_exec_arg_mutex);
48156+#endif
48157+ return;
48158+}
48159+#endif
48160diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48161--- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48162+++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48163@@ -0,0 +1,24 @@
48164+#include <linux/kernel.h>
48165+#include <linux/sched.h>
48166+#include <linux/fs.h>
48167+#include <linux/file.h>
48168+#include <linux/grinternal.h>
48169+
48170+int
48171+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48172+ const struct dentry *dir, const int flag, const int acc_mode)
48173+{
48174+#ifdef CONFIG_GRKERNSEC_FIFO
48175+ const struct cred *cred = current_cred();
48176+
48177+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48178+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48179+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48180+ (cred->fsuid != dentry->d_inode->i_uid)) {
48181+ if (!inode_permission(dentry->d_inode, acc_mode))
48182+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48183+ return -EACCES;
48184+ }
48185+#endif
48186+ return 0;
48187+}
48188diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48189--- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48190+++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48191@@ -0,0 +1,23 @@
48192+#include <linux/kernel.h>
48193+#include <linux/sched.h>
48194+#include <linux/grsecurity.h>
48195+#include <linux/grinternal.h>
48196+#include <linux/errno.h>
48197+
48198+void
48199+gr_log_forkfail(const int retval)
48200+{
48201+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48202+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48203+ switch (retval) {
48204+ case -EAGAIN:
48205+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48206+ break;
48207+ case -ENOMEM:
48208+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48209+ break;
48210+ }
48211+ }
48212+#endif
48213+ return;
48214+}
48215diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48216--- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48217+++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48218@@ -0,0 +1,273 @@
48219+#include <linux/kernel.h>
48220+#include <linux/sched.h>
48221+#include <linux/mm.h>
48222+#include <linux/gracl.h>
48223+#include <linux/slab.h>
48224+#include <linux/vmalloc.h>
48225+#include <linux/percpu.h>
48226+#include <linux/module.h>
48227+
48228+int grsec_enable_brute;
48229+int grsec_enable_link;
48230+int grsec_enable_dmesg;
48231+int grsec_enable_harden_ptrace;
48232+int grsec_enable_fifo;
48233+int grsec_enable_execve;
48234+int grsec_enable_execlog;
48235+int grsec_enable_signal;
48236+int grsec_enable_forkfail;
48237+int grsec_enable_audit_ptrace;
48238+int grsec_enable_time;
48239+int grsec_enable_audit_textrel;
48240+int grsec_enable_group;
48241+int grsec_audit_gid;
48242+int grsec_enable_chdir;
48243+int grsec_enable_mount;
48244+int grsec_enable_rofs;
48245+int grsec_enable_chroot_findtask;
48246+int grsec_enable_chroot_mount;
48247+int grsec_enable_chroot_shmat;
48248+int grsec_enable_chroot_fchdir;
48249+int grsec_enable_chroot_double;
48250+int grsec_enable_chroot_pivot;
48251+int grsec_enable_chroot_chdir;
48252+int grsec_enable_chroot_chmod;
48253+int grsec_enable_chroot_mknod;
48254+int grsec_enable_chroot_nice;
48255+int grsec_enable_chroot_execlog;
48256+int grsec_enable_chroot_caps;
48257+int grsec_enable_chroot_sysctl;
48258+int grsec_enable_chroot_unix;
48259+int grsec_enable_tpe;
48260+int grsec_tpe_gid;
48261+int grsec_enable_blackhole;
48262+#ifdef CONFIG_IPV6_MODULE
48263+EXPORT_SYMBOL(grsec_enable_blackhole);
48264+#endif
48265+int grsec_lastack_retries;
48266+int grsec_enable_tpe_all;
48267+int grsec_enable_tpe_invert;
48268+int grsec_enable_socket_all;
48269+int grsec_socket_all_gid;
48270+int grsec_enable_socket_client;
48271+int grsec_socket_client_gid;
48272+int grsec_enable_socket_server;
48273+int grsec_socket_server_gid;
48274+int grsec_resource_logging;
48275+int grsec_disable_privio;
48276+int grsec_enable_log_rwxmaps;
48277+int grsec_lock;
48278+
48279+DEFINE_SPINLOCK(grsec_alert_lock);
48280+unsigned long grsec_alert_wtime = 0;
48281+unsigned long grsec_alert_fyet = 0;
48282+
48283+DEFINE_SPINLOCK(grsec_audit_lock);
48284+
48285+DEFINE_RWLOCK(grsec_exec_file_lock);
48286+
48287+char *gr_shared_page[4];
48288+
48289+char *gr_alert_log_fmt;
48290+char *gr_audit_log_fmt;
48291+char *gr_alert_log_buf;
48292+char *gr_audit_log_buf;
48293+
48294+extern struct gr_arg *gr_usermode;
48295+extern unsigned char *gr_system_salt;
48296+extern unsigned char *gr_system_sum;
48297+
48298+void __init
48299+grsecurity_init(void)
48300+{
48301+ int j;
48302+ /* create the per-cpu shared pages */
48303+
48304+#ifdef CONFIG_X86
48305+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48306+#endif
48307+
48308+ for (j = 0; j < 4; j++) {
48309+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48310+ if (gr_shared_page[j] == NULL) {
48311+ panic("Unable to allocate grsecurity shared page");
48312+ return;
48313+ }
48314+ }
48315+
48316+ /* allocate log buffers */
48317+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48318+ if (!gr_alert_log_fmt) {
48319+ panic("Unable to allocate grsecurity alert log format buffer");
48320+ return;
48321+ }
48322+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48323+ if (!gr_audit_log_fmt) {
48324+ panic("Unable to allocate grsecurity audit log format buffer");
48325+ return;
48326+ }
48327+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48328+ if (!gr_alert_log_buf) {
48329+ panic("Unable to allocate grsecurity alert log buffer");
48330+ return;
48331+ }
48332+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48333+ if (!gr_audit_log_buf) {
48334+ panic("Unable to allocate grsecurity audit log buffer");
48335+ return;
48336+ }
48337+
48338+ /* allocate memory for authentication structure */
48339+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48340+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48341+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48342+
48343+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48344+ panic("Unable to allocate grsecurity authentication structure");
48345+ return;
48346+ }
48347+
48348+
48349+#ifdef CONFIG_GRKERNSEC_IO
48350+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48351+ grsec_disable_privio = 1;
48352+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48353+ grsec_disable_privio = 1;
48354+#else
48355+ grsec_disable_privio = 0;
48356+#endif
48357+#endif
48358+
48359+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48360+ /* for backward compatibility, tpe_invert always defaults to on if
48361+ enabled in the kernel
48362+ */
48363+ grsec_enable_tpe_invert = 1;
48364+#endif
48365+
48366+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48367+#ifndef CONFIG_GRKERNSEC_SYSCTL
48368+ grsec_lock = 1;
48369+#endif
48370+
48371+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48372+ grsec_enable_audit_textrel = 1;
48373+#endif
48374+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48375+ grsec_enable_log_rwxmaps = 1;
48376+#endif
48377+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48378+ grsec_enable_group = 1;
48379+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48380+#endif
48381+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48382+ grsec_enable_chdir = 1;
48383+#endif
48384+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48385+ grsec_enable_harden_ptrace = 1;
48386+#endif
48387+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48388+ grsec_enable_mount = 1;
48389+#endif
48390+#ifdef CONFIG_GRKERNSEC_LINK
48391+ grsec_enable_link = 1;
48392+#endif
48393+#ifdef CONFIG_GRKERNSEC_BRUTE
48394+ grsec_enable_brute = 1;
48395+#endif
48396+#ifdef CONFIG_GRKERNSEC_DMESG
48397+ grsec_enable_dmesg = 1;
48398+#endif
48399+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48400+ grsec_enable_blackhole = 1;
48401+ grsec_lastack_retries = 4;
48402+#endif
48403+#ifdef CONFIG_GRKERNSEC_FIFO
48404+ grsec_enable_fifo = 1;
48405+#endif
48406+#ifdef CONFIG_GRKERNSEC_EXECVE
48407+ grsec_enable_execve = 1;
48408+#endif
48409+#ifdef CONFIG_GRKERNSEC_EXECLOG
48410+ grsec_enable_execlog = 1;
48411+#endif
48412+#ifdef CONFIG_GRKERNSEC_SIGNAL
48413+ grsec_enable_signal = 1;
48414+#endif
48415+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48416+ grsec_enable_forkfail = 1;
48417+#endif
48418+#ifdef CONFIG_GRKERNSEC_TIME
48419+ grsec_enable_time = 1;
48420+#endif
48421+#ifdef CONFIG_GRKERNSEC_RESLOG
48422+ grsec_resource_logging = 1;
48423+#endif
48424+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48425+ grsec_enable_chroot_findtask = 1;
48426+#endif
48427+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48428+ grsec_enable_chroot_unix = 1;
48429+#endif
48430+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48431+ grsec_enable_chroot_mount = 1;
48432+#endif
48433+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48434+ grsec_enable_chroot_fchdir = 1;
48435+#endif
48436+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48437+ grsec_enable_chroot_shmat = 1;
48438+#endif
48439+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48440+ grsec_enable_audit_ptrace = 1;
48441+#endif
48442+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48443+ grsec_enable_chroot_double = 1;
48444+#endif
48445+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48446+ grsec_enable_chroot_pivot = 1;
48447+#endif
48448+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48449+ grsec_enable_chroot_chdir = 1;
48450+#endif
48451+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48452+ grsec_enable_chroot_chmod = 1;
48453+#endif
48454+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48455+ grsec_enable_chroot_mknod = 1;
48456+#endif
48457+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48458+ grsec_enable_chroot_nice = 1;
48459+#endif
48460+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48461+ grsec_enable_chroot_execlog = 1;
48462+#endif
48463+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48464+ grsec_enable_chroot_caps = 1;
48465+#endif
48466+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48467+ grsec_enable_chroot_sysctl = 1;
48468+#endif
48469+#ifdef CONFIG_GRKERNSEC_TPE
48470+ grsec_enable_tpe = 1;
48471+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48472+#ifdef CONFIG_GRKERNSEC_TPE_ALL
48473+ grsec_enable_tpe_all = 1;
48474+#endif
48475+#endif
48476+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48477+ grsec_enable_socket_all = 1;
48478+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48479+#endif
48480+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48481+ grsec_enable_socket_client = 1;
48482+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48483+#endif
48484+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48485+ grsec_enable_socket_server = 1;
48486+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48487+#endif
48488+#endif
48489+
48490+ return;
48491+}
48492diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48493--- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48494+++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48495@@ -0,0 +1,43 @@
48496+#include <linux/kernel.h>
48497+#include <linux/sched.h>
48498+#include <linux/fs.h>
48499+#include <linux/file.h>
48500+#include <linux/grinternal.h>
48501+
48502+int
48503+gr_handle_follow_link(const struct inode *parent,
48504+ const struct inode *inode,
48505+ const struct dentry *dentry, const struct vfsmount *mnt)
48506+{
48507+#ifdef CONFIG_GRKERNSEC_LINK
48508+ const struct cred *cred = current_cred();
48509+
48510+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48511+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48512+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48513+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48514+ return -EACCES;
48515+ }
48516+#endif
48517+ return 0;
48518+}
48519+
48520+int
48521+gr_handle_hardlink(const struct dentry *dentry,
48522+ const struct vfsmount *mnt,
48523+ struct inode *inode, const int mode, const char *to)
48524+{
48525+#ifdef CONFIG_GRKERNSEC_LINK
48526+ const struct cred *cred = current_cred();
48527+
48528+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48529+ (!S_ISREG(mode) || (mode & S_ISUID) ||
48530+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48531+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48532+ !capable(CAP_FOWNER) && cred->uid) {
48533+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48534+ return -EPERM;
48535+ }
48536+#endif
48537+ return 0;
48538+}
48539diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48540--- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48541+++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48542@@ -0,0 +1,310 @@
48543+#include <linux/kernel.h>
48544+#include <linux/sched.h>
48545+#include <linux/file.h>
48546+#include <linux/tty.h>
48547+#include <linux/fs.h>
48548+#include <linux/grinternal.h>
48549+
48550+#ifdef CONFIG_TREE_PREEMPT_RCU
48551+#define DISABLE_PREEMPT() preempt_disable()
48552+#define ENABLE_PREEMPT() preempt_enable()
48553+#else
48554+#define DISABLE_PREEMPT()
48555+#define ENABLE_PREEMPT()
48556+#endif
48557+
48558+#define BEGIN_LOCKS(x) \
48559+ DISABLE_PREEMPT(); \
48560+ rcu_read_lock(); \
48561+ read_lock(&tasklist_lock); \
48562+ read_lock(&grsec_exec_file_lock); \
48563+ if (x != GR_DO_AUDIT) \
48564+ spin_lock(&grsec_alert_lock); \
48565+ else \
48566+ spin_lock(&grsec_audit_lock)
48567+
48568+#define END_LOCKS(x) \
48569+ if (x != GR_DO_AUDIT) \
48570+ spin_unlock(&grsec_alert_lock); \
48571+ else \
48572+ spin_unlock(&grsec_audit_lock); \
48573+ read_unlock(&grsec_exec_file_lock); \
48574+ read_unlock(&tasklist_lock); \
48575+ rcu_read_unlock(); \
48576+ ENABLE_PREEMPT(); \
48577+ if (x == GR_DONT_AUDIT) \
48578+ gr_handle_alertkill(current)
48579+
48580+enum {
48581+ FLOODING,
48582+ NO_FLOODING
48583+};
48584+
48585+extern char *gr_alert_log_fmt;
48586+extern char *gr_audit_log_fmt;
48587+extern char *gr_alert_log_buf;
48588+extern char *gr_audit_log_buf;
48589+
48590+static int gr_log_start(int audit)
48591+{
48592+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48593+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48594+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48595+
48596+ if (audit == GR_DO_AUDIT)
48597+ goto set_fmt;
48598+
48599+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48600+ grsec_alert_wtime = jiffies;
48601+ grsec_alert_fyet = 0;
48602+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48603+ grsec_alert_fyet++;
48604+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48605+ grsec_alert_wtime = jiffies;
48606+ grsec_alert_fyet++;
48607+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48608+ return FLOODING;
48609+ } else return FLOODING;
48610+
48611+set_fmt:
48612+ memset(buf, 0, PAGE_SIZE);
48613+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
48614+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48615+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48616+ } else if (current->signal->curr_ip) {
48617+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48618+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48619+ } else if (gr_acl_is_enabled()) {
48620+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48621+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48622+ } else {
48623+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
48624+ strcpy(buf, fmt);
48625+ }
48626+
48627+ return NO_FLOODING;
48628+}
48629+
48630+static void gr_log_middle(int audit, const char *msg, va_list ap)
48631+ __attribute__ ((format (printf, 2, 0)));
48632+
48633+static void gr_log_middle(int audit, const char *msg, va_list ap)
48634+{
48635+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48636+ unsigned int len = strlen(buf);
48637+
48638+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48639+
48640+ return;
48641+}
48642+
48643+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48644+ __attribute__ ((format (printf, 2, 3)));
48645+
48646+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48647+{
48648+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48649+ unsigned int len = strlen(buf);
48650+ va_list ap;
48651+
48652+ va_start(ap, msg);
48653+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48654+ va_end(ap);
48655+
48656+ return;
48657+}
48658+
48659+static void gr_log_end(int audit)
48660+{
48661+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48662+ unsigned int len = strlen(buf);
48663+
48664+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48665+ printk("%s\n", buf);
48666+
48667+ return;
48668+}
48669+
48670+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48671+{
48672+ int logtype;
48673+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48674+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48675+ void *voidptr = NULL;
48676+ int num1 = 0, num2 = 0;
48677+ unsigned long ulong1 = 0, ulong2 = 0;
48678+ struct dentry *dentry = NULL;
48679+ struct vfsmount *mnt = NULL;
48680+ struct file *file = NULL;
48681+ struct task_struct *task = NULL;
48682+ const struct cred *cred, *pcred;
48683+ va_list ap;
48684+
48685+ BEGIN_LOCKS(audit);
48686+ logtype = gr_log_start(audit);
48687+ if (logtype == FLOODING) {
48688+ END_LOCKS(audit);
48689+ return;
48690+ }
48691+ va_start(ap, argtypes);
48692+ switch (argtypes) {
48693+ case GR_TTYSNIFF:
48694+ task = va_arg(ap, struct task_struct *);
48695+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48696+ break;
48697+ case GR_SYSCTL_HIDDEN:
48698+ str1 = va_arg(ap, char *);
48699+ gr_log_middle_varargs(audit, msg, result, str1);
48700+ break;
48701+ case GR_RBAC:
48702+ dentry = va_arg(ap, struct dentry *);
48703+ mnt = va_arg(ap, struct vfsmount *);
48704+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48705+ break;
48706+ case GR_RBAC_STR:
48707+ dentry = va_arg(ap, struct dentry *);
48708+ mnt = va_arg(ap, struct vfsmount *);
48709+ str1 = va_arg(ap, char *);
48710+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48711+ break;
48712+ case GR_STR_RBAC:
48713+ str1 = va_arg(ap, char *);
48714+ dentry = va_arg(ap, struct dentry *);
48715+ mnt = va_arg(ap, struct vfsmount *);
48716+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48717+ break;
48718+ case GR_RBAC_MODE2:
48719+ dentry = va_arg(ap, struct dentry *);
48720+ mnt = va_arg(ap, struct vfsmount *);
48721+ str1 = va_arg(ap, char *);
48722+ str2 = va_arg(ap, char *);
48723+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48724+ break;
48725+ case GR_RBAC_MODE3:
48726+ dentry = va_arg(ap, struct dentry *);
48727+ mnt = va_arg(ap, struct vfsmount *);
48728+ str1 = va_arg(ap, char *);
48729+ str2 = va_arg(ap, char *);
48730+ str3 = va_arg(ap, char *);
48731+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48732+ break;
48733+ case GR_FILENAME:
48734+ dentry = va_arg(ap, struct dentry *);
48735+ mnt = va_arg(ap, struct vfsmount *);
48736+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48737+ break;
48738+ case GR_STR_FILENAME:
48739+ str1 = va_arg(ap, char *);
48740+ dentry = va_arg(ap, struct dentry *);
48741+ mnt = va_arg(ap, struct vfsmount *);
48742+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48743+ break;
48744+ case GR_FILENAME_STR:
48745+ dentry = va_arg(ap, struct dentry *);
48746+ mnt = va_arg(ap, struct vfsmount *);
48747+ str1 = va_arg(ap, char *);
48748+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48749+ break;
48750+ case GR_FILENAME_TWO_INT:
48751+ dentry = va_arg(ap, struct dentry *);
48752+ mnt = va_arg(ap, struct vfsmount *);
48753+ num1 = va_arg(ap, int);
48754+ num2 = va_arg(ap, int);
48755+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48756+ break;
48757+ case GR_FILENAME_TWO_INT_STR:
48758+ dentry = va_arg(ap, struct dentry *);
48759+ mnt = va_arg(ap, struct vfsmount *);
48760+ num1 = va_arg(ap, int);
48761+ num2 = va_arg(ap, int);
48762+ str1 = va_arg(ap, char *);
48763+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48764+ break;
48765+ case GR_TEXTREL:
48766+ file = va_arg(ap, struct file *);
48767+ ulong1 = va_arg(ap, unsigned long);
48768+ ulong2 = va_arg(ap, unsigned long);
48769+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48770+ break;
48771+ case GR_PTRACE:
48772+ task = va_arg(ap, struct task_struct *);
48773+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48774+ break;
48775+ case GR_RESOURCE:
48776+ task = va_arg(ap, struct task_struct *);
48777+ cred = __task_cred(task);
48778+ pcred = __task_cred(task->real_parent);
48779+ ulong1 = va_arg(ap, unsigned long);
48780+ str1 = va_arg(ap, char *);
48781+ ulong2 = va_arg(ap, unsigned long);
48782+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48783+ break;
48784+ case GR_CAP:
48785+ task = va_arg(ap, struct task_struct *);
48786+ cred = __task_cred(task);
48787+ pcred = __task_cred(task->real_parent);
48788+ str1 = va_arg(ap, char *);
48789+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48790+ break;
48791+ case GR_SIG:
48792+ str1 = va_arg(ap, char *);
48793+ voidptr = va_arg(ap, void *);
48794+ gr_log_middle_varargs(audit, msg, str1, voidptr);
48795+ break;
48796+ case GR_SIG2:
48797+ task = va_arg(ap, struct task_struct *);
48798+ cred = __task_cred(task);
48799+ pcred = __task_cred(task->real_parent);
48800+ num1 = va_arg(ap, int);
48801+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48802+ break;
48803+ case GR_CRASH1:
48804+ task = va_arg(ap, struct task_struct *);
48805+ cred = __task_cred(task);
48806+ pcred = __task_cred(task->real_parent);
48807+ ulong1 = va_arg(ap, unsigned long);
48808+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48809+ break;
48810+ case GR_CRASH2:
48811+ task = va_arg(ap, struct task_struct *);
48812+ cred = __task_cred(task);
48813+ pcred = __task_cred(task->real_parent);
48814+ ulong1 = va_arg(ap, unsigned long);
48815+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48816+ break;
48817+ case GR_RWXMAP:
48818+ file = va_arg(ap, struct file *);
48819+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48820+ break;
48821+ case GR_PSACCT:
48822+ {
48823+ unsigned int wday, cday;
48824+ __u8 whr, chr;
48825+ __u8 wmin, cmin;
48826+ __u8 wsec, csec;
48827+ char cur_tty[64] = { 0 };
48828+ char parent_tty[64] = { 0 };
48829+
48830+ task = va_arg(ap, struct task_struct *);
48831+ wday = va_arg(ap, unsigned int);
48832+ cday = va_arg(ap, unsigned int);
48833+ whr = va_arg(ap, int);
48834+ chr = va_arg(ap, int);
48835+ wmin = va_arg(ap, int);
48836+ cmin = va_arg(ap, int);
48837+ wsec = va_arg(ap, int);
48838+ csec = va_arg(ap, int);
48839+ ulong1 = va_arg(ap, unsigned long);
48840+ cred = __task_cred(task);
48841+ pcred = __task_cred(task->real_parent);
48842+
48843+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48844+ }
48845+ break;
48846+ default:
48847+ gr_log_middle(audit, msg, ap);
48848+ }
48849+ va_end(ap);
48850+ gr_log_end(audit);
48851+ END_LOCKS(audit);
48852+}
48853diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48854--- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48855+++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48856@@ -0,0 +1,33 @@
48857+#include <linux/kernel.h>
48858+#include <linux/sched.h>
48859+#include <linux/mm.h>
48860+#include <linux/mman.h>
48861+#include <linux/grinternal.h>
48862+
48863+void
48864+gr_handle_ioperm(void)
48865+{
48866+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48867+ return;
48868+}
48869+
48870+void
48871+gr_handle_iopl(void)
48872+{
48873+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48874+ return;
48875+}
48876+
48877+void
48878+gr_handle_mem_readwrite(u64 from, u64 to)
48879+{
48880+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48881+ return;
48882+}
48883+
48884+void
48885+gr_handle_vm86(void)
48886+{
48887+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48888+ return;
48889+}
48890diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48891--- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48892+++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48893@@ -0,0 +1,62 @@
48894+#include <linux/kernel.h>
48895+#include <linux/sched.h>
48896+#include <linux/mount.h>
48897+#include <linux/grsecurity.h>
48898+#include <linux/grinternal.h>
48899+
48900+void
48901+gr_log_remount(const char *devname, const int retval)
48902+{
48903+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48904+ if (grsec_enable_mount && (retval >= 0))
48905+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48906+#endif
48907+ return;
48908+}
48909+
48910+void
48911+gr_log_unmount(const char *devname, const int retval)
48912+{
48913+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48914+ if (grsec_enable_mount && (retval >= 0))
48915+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48916+#endif
48917+ return;
48918+}
48919+
48920+void
48921+gr_log_mount(const char *from, const char *to, const int retval)
48922+{
48923+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48924+ if (grsec_enable_mount && (retval >= 0))
48925+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48926+#endif
48927+ return;
48928+}
48929+
48930+int
48931+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48932+{
48933+#ifdef CONFIG_GRKERNSEC_ROFS
48934+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48935+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48936+ return -EPERM;
48937+ } else
48938+ return 0;
48939+#endif
48940+ return 0;
48941+}
48942+
48943+int
48944+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48945+{
48946+#ifdef CONFIG_GRKERNSEC_ROFS
48947+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48948+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48949+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48950+ return -EPERM;
48951+ } else
48952+ return 0;
48953+#endif
48954+ return 0;
48955+}
48956diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48957--- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48958+++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48959@@ -0,0 +1,36 @@
48960+#include <linux/kernel.h>
48961+#include <linux/sched.h>
48962+#include <linux/mm.h>
48963+#include <linux/file.h>
48964+#include <linux/grinternal.h>
48965+#include <linux/grsecurity.h>
48966+
48967+void
48968+gr_log_textrel(struct vm_area_struct * vma)
48969+{
48970+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48971+ if (grsec_enable_audit_textrel)
48972+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48973+#endif
48974+ return;
48975+}
48976+
48977+void
48978+gr_log_rwxmmap(struct file *file)
48979+{
48980+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48981+ if (grsec_enable_log_rwxmaps)
48982+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48983+#endif
48984+ return;
48985+}
48986+
48987+void
48988+gr_log_rwxmprotect(struct file *file)
48989+{
48990+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48991+ if (grsec_enable_log_rwxmaps)
48992+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48993+#endif
48994+ return;
48995+}
48996diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
48997--- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48998+++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
48999@@ -0,0 +1,14 @@
49000+#include <linux/kernel.h>
49001+#include <linux/sched.h>
49002+#include <linux/grinternal.h>
49003+#include <linux/grsecurity.h>
49004+
49005+void
49006+gr_audit_ptrace(struct task_struct *task)
49007+{
49008+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49009+ if (grsec_enable_audit_ptrace)
49010+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
49011+#endif
49012+ return;
49013+}
49014diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
49015--- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
49016+++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
49017@@ -0,0 +1,206 @@
49018+#include <linux/kernel.h>
49019+#include <linux/sched.h>
49020+#include <linux/delay.h>
49021+#include <linux/grsecurity.h>
49022+#include <linux/grinternal.h>
49023+#include <linux/hardirq.h>
49024+
49025+char *signames[] = {
49026+ [SIGSEGV] = "Segmentation fault",
49027+ [SIGILL] = "Illegal instruction",
49028+ [SIGABRT] = "Abort",
49029+ [SIGBUS] = "Invalid alignment/Bus error"
49030+};
49031+
49032+void
49033+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
49034+{
49035+#ifdef CONFIG_GRKERNSEC_SIGNAL
49036+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
49037+ (sig == SIGABRT) || (sig == SIGBUS))) {
49038+ if (t->pid == current->pid) {
49039+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
49040+ } else {
49041+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
49042+ }
49043+ }
49044+#endif
49045+ return;
49046+}
49047+
49048+int
49049+gr_handle_signal(const struct task_struct *p, const int sig)
49050+{
49051+#ifdef CONFIG_GRKERNSEC
49052+ if (current->pid > 1 && gr_check_protected_task(p)) {
49053+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
49054+ return -EPERM;
49055+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
49056+ return -EPERM;
49057+ }
49058+#endif
49059+ return 0;
49060+}
49061+
49062+#ifdef CONFIG_GRKERNSEC
49063+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
49064+
49065+int gr_fake_force_sig(int sig, struct task_struct *t)
49066+{
49067+ unsigned long int flags;
49068+ int ret, blocked, ignored;
49069+ struct k_sigaction *action;
49070+
49071+ spin_lock_irqsave(&t->sighand->siglock, flags);
49072+ action = &t->sighand->action[sig-1];
49073+ ignored = action->sa.sa_handler == SIG_IGN;
49074+ blocked = sigismember(&t->blocked, sig);
49075+ if (blocked || ignored) {
49076+ action->sa.sa_handler = SIG_DFL;
49077+ if (blocked) {
49078+ sigdelset(&t->blocked, sig);
49079+ recalc_sigpending_and_wake(t);
49080+ }
49081+ }
49082+ if (action->sa.sa_handler == SIG_DFL)
49083+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
49084+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
49085+
49086+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
49087+
49088+ return ret;
49089+}
49090+#endif
49091+
49092+#ifdef CONFIG_GRKERNSEC_BRUTE
49093+#define GR_USER_BAN_TIME (15 * 60)
49094+
49095+static int __get_dumpable(unsigned long mm_flags)
49096+{
49097+ int ret;
49098+
49099+ ret = mm_flags & MMF_DUMPABLE_MASK;
49100+ return (ret >= 2) ? 2 : ret;
49101+}
49102+#endif
49103+
49104+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
49105+{
49106+#ifdef CONFIG_GRKERNSEC_BRUTE
49107+ uid_t uid = 0;
49108+
49109+ if (!grsec_enable_brute)
49110+ return;
49111+
49112+ rcu_read_lock();
49113+ read_lock(&tasklist_lock);
49114+ read_lock(&grsec_exec_file_lock);
49115+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
49116+ p->real_parent->brute = 1;
49117+ else {
49118+ const struct cred *cred = __task_cred(p), *cred2;
49119+ struct task_struct *tsk, *tsk2;
49120+
49121+ if (!__get_dumpable(mm_flags) && cred->uid) {
49122+ struct user_struct *user;
49123+
49124+ uid = cred->uid;
49125+
49126+ /* this is put upon execution past expiration */
49127+ user = find_user(uid);
49128+ if (user == NULL)
49129+ goto unlock;
49130+ user->banned = 1;
49131+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
49132+ if (user->ban_expires == ~0UL)
49133+ user->ban_expires--;
49134+
49135+ do_each_thread(tsk2, tsk) {
49136+ cred2 = __task_cred(tsk);
49137+ if (tsk != p && cred2->uid == uid)
49138+ gr_fake_force_sig(SIGKILL, tsk);
49139+ } while_each_thread(tsk2, tsk);
49140+ }
49141+ }
49142+unlock:
49143+ read_unlock(&grsec_exec_file_lock);
49144+ read_unlock(&tasklist_lock);
49145+ rcu_read_unlock();
49146+
49147+ if (uid)
49148+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49149+
49150+#endif
49151+ return;
49152+}
49153+
49154+void gr_handle_brute_check(void)
49155+{
49156+#ifdef CONFIG_GRKERNSEC_BRUTE
49157+ if (current->brute)
49158+ msleep(30 * 1000);
49159+#endif
49160+ return;
49161+}
49162+
49163+void gr_handle_kernel_exploit(void)
49164+{
49165+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49166+ const struct cred *cred;
49167+ struct task_struct *tsk, *tsk2;
49168+ struct user_struct *user;
49169+ uid_t uid;
49170+
49171+ if (in_irq() || in_serving_softirq() || in_nmi())
49172+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49173+
49174+ uid = current_uid();
49175+
49176+ if (uid == 0)
49177+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
49178+ else {
49179+ /* kill all the processes of this user, hold a reference
49180+ to their creds struct, and prevent them from creating
49181+ another process until system reset
49182+ */
49183+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49184+ /* we intentionally leak this ref */
49185+ user = get_uid(current->cred->user);
49186+ if (user) {
49187+ user->banned = 1;
49188+ user->ban_expires = ~0UL;
49189+ }
49190+
49191+ read_lock(&tasklist_lock);
49192+ do_each_thread(tsk2, tsk) {
49193+ cred = __task_cred(tsk);
49194+ if (cred->uid == uid)
49195+ gr_fake_force_sig(SIGKILL, tsk);
49196+ } while_each_thread(tsk2, tsk);
49197+ read_unlock(&tasklist_lock);
49198+ }
49199+#endif
49200+}
49201+
49202+int __gr_process_user_ban(struct user_struct *user)
49203+{
49204+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49205+ if (unlikely(user->banned)) {
49206+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49207+ user->banned = 0;
49208+ user->ban_expires = 0;
49209+ free_uid(user);
49210+ } else
49211+ return -EPERM;
49212+ }
49213+#endif
49214+ return 0;
49215+}
49216+
49217+int gr_process_user_ban(void)
49218+{
49219+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49220+ return __gr_process_user_ban(current->cred->user);
49221+#endif
49222+ return 0;
49223+}
49224diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49225--- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49226+++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49227@@ -0,0 +1,244 @@
49228+#include <linux/kernel.h>
49229+#include <linux/module.h>
49230+#include <linux/sched.h>
49231+#include <linux/file.h>
49232+#include <linux/net.h>
49233+#include <linux/in.h>
49234+#include <linux/ip.h>
49235+#include <net/sock.h>
49236+#include <net/inet_sock.h>
49237+#include <linux/grsecurity.h>
49238+#include <linux/grinternal.h>
49239+#include <linux/gracl.h>
49240+
49241+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49242+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49243+
49244+EXPORT_SYMBOL(gr_search_udp_recvmsg);
49245+EXPORT_SYMBOL(gr_search_udp_sendmsg);
49246+
49247+#ifdef CONFIG_UNIX_MODULE
49248+EXPORT_SYMBOL(gr_acl_handle_unix);
49249+EXPORT_SYMBOL(gr_acl_handle_mknod);
49250+EXPORT_SYMBOL(gr_handle_chroot_unix);
49251+EXPORT_SYMBOL(gr_handle_create);
49252+#endif
49253+
49254+#ifdef CONFIG_GRKERNSEC
49255+#define gr_conn_table_size 32749
49256+struct conn_table_entry {
49257+ struct conn_table_entry *next;
49258+ struct signal_struct *sig;
49259+};
49260+
49261+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49262+DEFINE_SPINLOCK(gr_conn_table_lock);
49263+
49264+extern const char * gr_socktype_to_name(unsigned char type);
49265+extern const char * gr_proto_to_name(unsigned char proto);
49266+extern const char * gr_sockfamily_to_name(unsigned char family);
49267+
49268+static __inline__ int
49269+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49270+{
49271+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49272+}
49273+
49274+static __inline__ int
49275+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49276+ __u16 sport, __u16 dport)
49277+{
49278+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49279+ sig->gr_sport == sport && sig->gr_dport == dport))
49280+ return 1;
49281+ else
49282+ return 0;
49283+}
49284+
49285+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49286+{
49287+ struct conn_table_entry **match;
49288+ unsigned int index;
49289+
49290+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49291+ sig->gr_sport, sig->gr_dport,
49292+ gr_conn_table_size);
49293+
49294+ newent->sig = sig;
49295+
49296+ match = &gr_conn_table[index];
49297+ newent->next = *match;
49298+ *match = newent;
49299+
49300+ return;
49301+}
49302+
49303+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49304+{
49305+ struct conn_table_entry *match, *last = NULL;
49306+ unsigned int index;
49307+
49308+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49309+ sig->gr_sport, sig->gr_dport,
49310+ gr_conn_table_size);
49311+
49312+ match = gr_conn_table[index];
49313+ while (match && !conn_match(match->sig,
49314+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49315+ sig->gr_dport)) {
49316+ last = match;
49317+ match = match->next;
49318+ }
49319+
49320+ if (match) {
49321+ if (last)
49322+ last->next = match->next;
49323+ else
49324+ gr_conn_table[index] = NULL;
49325+ kfree(match);
49326+ }
49327+
49328+ return;
49329+}
49330+
49331+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49332+ __u16 sport, __u16 dport)
49333+{
49334+ struct conn_table_entry *match;
49335+ unsigned int index;
49336+
49337+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49338+
49339+ match = gr_conn_table[index];
49340+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49341+ match = match->next;
49342+
49343+ if (match)
49344+ return match->sig;
49345+ else
49346+ return NULL;
49347+}
49348+
49349+#endif
49350+
49351+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49352+{
49353+#ifdef CONFIG_GRKERNSEC
49354+ struct signal_struct *sig = task->signal;
49355+ struct conn_table_entry *newent;
49356+
49357+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49358+ if (newent == NULL)
49359+ return;
49360+ /* no bh lock needed since we are called with bh disabled */
49361+ spin_lock(&gr_conn_table_lock);
49362+ gr_del_task_from_ip_table_nolock(sig);
49363+ sig->gr_saddr = inet->inet_rcv_saddr;
49364+ sig->gr_daddr = inet->inet_daddr;
49365+ sig->gr_sport = inet->inet_sport;
49366+ sig->gr_dport = inet->inet_dport;
49367+ gr_add_to_task_ip_table_nolock(sig, newent);
49368+ spin_unlock(&gr_conn_table_lock);
49369+#endif
49370+ return;
49371+}
49372+
49373+void gr_del_task_from_ip_table(struct task_struct *task)
49374+{
49375+#ifdef CONFIG_GRKERNSEC
49376+ spin_lock_bh(&gr_conn_table_lock);
49377+ gr_del_task_from_ip_table_nolock(task->signal);
49378+ spin_unlock_bh(&gr_conn_table_lock);
49379+#endif
49380+ return;
49381+}
49382+
49383+void
49384+gr_attach_curr_ip(const struct sock *sk)
49385+{
49386+#ifdef CONFIG_GRKERNSEC
49387+ struct signal_struct *p, *set;
49388+ const struct inet_sock *inet = inet_sk(sk);
49389+
49390+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49391+ return;
49392+
49393+ set = current->signal;
49394+
49395+ spin_lock_bh(&gr_conn_table_lock);
49396+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49397+ inet->inet_dport, inet->inet_sport);
49398+ if (unlikely(p != NULL)) {
49399+ set->curr_ip = p->curr_ip;
49400+ set->used_accept = 1;
49401+ gr_del_task_from_ip_table_nolock(p);
49402+ spin_unlock_bh(&gr_conn_table_lock);
49403+ return;
49404+ }
49405+ spin_unlock_bh(&gr_conn_table_lock);
49406+
49407+ set->curr_ip = inet->inet_daddr;
49408+ set->used_accept = 1;
49409+#endif
49410+ return;
49411+}
49412+
49413+int
49414+gr_handle_sock_all(const int family, const int type, const int protocol)
49415+{
49416+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49417+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49418+ (family != AF_UNIX)) {
49419+ if (family == AF_INET)
49420+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49421+ else
49422+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49423+ return -EACCES;
49424+ }
49425+#endif
49426+ return 0;
49427+}
49428+
49429+int
49430+gr_handle_sock_server(const struct sockaddr *sck)
49431+{
49432+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49433+ if (grsec_enable_socket_server &&
49434+ in_group_p(grsec_socket_server_gid) &&
49435+ sck && (sck->sa_family != AF_UNIX) &&
49436+ (sck->sa_family != AF_LOCAL)) {
49437+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49438+ return -EACCES;
49439+ }
49440+#endif
49441+ return 0;
49442+}
49443+
49444+int
49445+gr_handle_sock_server_other(const struct sock *sck)
49446+{
49447+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49448+ if (grsec_enable_socket_server &&
49449+ in_group_p(grsec_socket_server_gid) &&
49450+ sck && (sck->sk_family != AF_UNIX) &&
49451+ (sck->sk_family != AF_LOCAL)) {
49452+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49453+ return -EACCES;
49454+ }
49455+#endif
49456+ return 0;
49457+}
49458+
49459+int
49460+gr_handle_sock_client(const struct sockaddr *sck)
49461+{
49462+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49463+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49464+ sck && (sck->sa_family != AF_UNIX) &&
49465+ (sck->sa_family != AF_LOCAL)) {
49466+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49467+ return -EACCES;
49468+ }
49469+#endif
49470+ return 0;
49471+}
49472diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49473--- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49474+++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49475@@ -0,0 +1,442 @@
49476+#include <linux/kernel.h>
49477+#include <linux/sched.h>
49478+#include <linux/sysctl.h>
49479+#include <linux/grsecurity.h>
49480+#include <linux/grinternal.h>
49481+
49482+int
49483+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49484+{
49485+#ifdef CONFIG_GRKERNSEC_SYSCTL
49486+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49487+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49488+ return -EACCES;
49489+ }
49490+#endif
49491+ return 0;
49492+}
49493+
49494+#ifdef CONFIG_GRKERNSEC_ROFS
49495+static int __maybe_unused one = 1;
49496+#endif
49497+
49498+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49499+struct ctl_table grsecurity_table[] = {
49500+#ifdef CONFIG_GRKERNSEC_SYSCTL
49501+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49502+#ifdef CONFIG_GRKERNSEC_IO
49503+ {
49504+ .procname = "disable_priv_io",
49505+ .data = &grsec_disable_privio,
49506+ .maxlen = sizeof(int),
49507+ .mode = 0600,
49508+ .proc_handler = &proc_dointvec,
49509+ },
49510+#endif
49511+#endif
49512+#ifdef CONFIG_GRKERNSEC_LINK
49513+ {
49514+ .procname = "linking_restrictions",
49515+ .data = &grsec_enable_link,
49516+ .maxlen = sizeof(int),
49517+ .mode = 0600,
49518+ .proc_handler = &proc_dointvec,
49519+ },
49520+#endif
49521+#ifdef CONFIG_GRKERNSEC_BRUTE
49522+ {
49523+ .procname = "deter_bruteforce",
49524+ .data = &grsec_enable_brute,
49525+ .maxlen = sizeof(int),
49526+ .mode = 0600,
49527+ .proc_handler = &proc_dointvec,
49528+ },
49529+#endif
49530+#ifdef CONFIG_GRKERNSEC_FIFO
49531+ {
49532+ .procname = "fifo_restrictions",
49533+ .data = &grsec_enable_fifo,
49534+ .maxlen = sizeof(int),
49535+ .mode = 0600,
49536+ .proc_handler = &proc_dointvec,
49537+ },
49538+#endif
49539+#ifdef CONFIG_GRKERNSEC_EXECVE
49540+ {
49541+ .procname = "execve_limiting",
49542+ .data = &grsec_enable_execve,
49543+ .maxlen = sizeof(int),
49544+ .mode = 0600,
49545+ .proc_handler = &proc_dointvec,
49546+ },
49547+#endif
49548+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49549+ {
49550+ .procname = "ip_blackhole",
49551+ .data = &grsec_enable_blackhole,
49552+ .maxlen = sizeof(int),
49553+ .mode = 0600,
49554+ .proc_handler = &proc_dointvec,
49555+ },
49556+ {
49557+ .procname = "lastack_retries",
49558+ .data = &grsec_lastack_retries,
49559+ .maxlen = sizeof(int),
49560+ .mode = 0600,
49561+ .proc_handler = &proc_dointvec,
49562+ },
49563+#endif
49564+#ifdef CONFIG_GRKERNSEC_EXECLOG
49565+ {
49566+ .procname = "exec_logging",
49567+ .data = &grsec_enable_execlog,
49568+ .maxlen = sizeof(int),
49569+ .mode = 0600,
49570+ .proc_handler = &proc_dointvec,
49571+ },
49572+#endif
49573+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49574+ {
49575+ .procname = "rwxmap_logging",
49576+ .data = &grsec_enable_log_rwxmaps,
49577+ .maxlen = sizeof(int),
49578+ .mode = 0600,
49579+ .proc_handler = &proc_dointvec,
49580+ },
49581+#endif
49582+#ifdef CONFIG_GRKERNSEC_SIGNAL
49583+ {
49584+ .procname = "signal_logging",
49585+ .data = &grsec_enable_signal,
49586+ .maxlen = sizeof(int),
49587+ .mode = 0600,
49588+ .proc_handler = &proc_dointvec,
49589+ },
49590+#endif
49591+#ifdef CONFIG_GRKERNSEC_FORKFAIL
49592+ {
49593+ .procname = "forkfail_logging",
49594+ .data = &grsec_enable_forkfail,
49595+ .maxlen = sizeof(int),
49596+ .mode = 0600,
49597+ .proc_handler = &proc_dointvec,
49598+ },
49599+#endif
49600+#ifdef CONFIG_GRKERNSEC_TIME
49601+ {
49602+ .procname = "timechange_logging",
49603+ .data = &grsec_enable_time,
49604+ .maxlen = sizeof(int),
49605+ .mode = 0600,
49606+ .proc_handler = &proc_dointvec,
49607+ },
49608+#endif
49609+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49610+ {
49611+ .procname = "chroot_deny_shmat",
49612+ .data = &grsec_enable_chroot_shmat,
49613+ .maxlen = sizeof(int),
49614+ .mode = 0600,
49615+ .proc_handler = &proc_dointvec,
49616+ },
49617+#endif
49618+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49619+ {
49620+ .procname = "chroot_deny_unix",
49621+ .data = &grsec_enable_chroot_unix,
49622+ .maxlen = sizeof(int),
49623+ .mode = 0600,
49624+ .proc_handler = &proc_dointvec,
49625+ },
49626+#endif
49627+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49628+ {
49629+ .procname = "chroot_deny_mount",
49630+ .data = &grsec_enable_chroot_mount,
49631+ .maxlen = sizeof(int),
49632+ .mode = 0600,
49633+ .proc_handler = &proc_dointvec,
49634+ },
49635+#endif
49636+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49637+ {
49638+ .procname = "chroot_deny_fchdir",
49639+ .data = &grsec_enable_chroot_fchdir,
49640+ .maxlen = sizeof(int),
49641+ .mode = 0600,
49642+ .proc_handler = &proc_dointvec,
49643+ },
49644+#endif
49645+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49646+ {
49647+ .procname = "chroot_deny_chroot",
49648+ .data = &grsec_enable_chroot_double,
49649+ .maxlen = sizeof(int),
49650+ .mode = 0600,
49651+ .proc_handler = &proc_dointvec,
49652+ },
49653+#endif
49654+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49655+ {
49656+ .procname = "chroot_deny_pivot",
49657+ .data = &grsec_enable_chroot_pivot,
49658+ .maxlen = sizeof(int),
49659+ .mode = 0600,
49660+ .proc_handler = &proc_dointvec,
49661+ },
49662+#endif
49663+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49664+ {
49665+ .procname = "chroot_enforce_chdir",
49666+ .data = &grsec_enable_chroot_chdir,
49667+ .maxlen = sizeof(int),
49668+ .mode = 0600,
49669+ .proc_handler = &proc_dointvec,
49670+ },
49671+#endif
49672+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49673+ {
49674+ .procname = "chroot_deny_chmod",
49675+ .data = &grsec_enable_chroot_chmod,
49676+ .maxlen = sizeof(int),
49677+ .mode = 0600,
49678+ .proc_handler = &proc_dointvec,
49679+ },
49680+#endif
49681+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49682+ {
49683+ .procname = "chroot_deny_mknod",
49684+ .data = &grsec_enable_chroot_mknod,
49685+ .maxlen = sizeof(int),
49686+ .mode = 0600,
49687+ .proc_handler = &proc_dointvec,
49688+ },
49689+#endif
49690+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49691+ {
49692+ .procname = "chroot_restrict_nice",
49693+ .data = &grsec_enable_chroot_nice,
49694+ .maxlen = sizeof(int),
49695+ .mode = 0600,
49696+ .proc_handler = &proc_dointvec,
49697+ },
49698+#endif
49699+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49700+ {
49701+ .procname = "chroot_execlog",
49702+ .data = &grsec_enable_chroot_execlog,
49703+ .maxlen = sizeof(int),
49704+ .mode = 0600,
49705+ .proc_handler = &proc_dointvec,
49706+ },
49707+#endif
49708+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49709+ {
49710+ .procname = "chroot_caps",
49711+ .data = &grsec_enable_chroot_caps,
49712+ .maxlen = sizeof(int),
49713+ .mode = 0600,
49714+ .proc_handler = &proc_dointvec,
49715+ },
49716+#endif
49717+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49718+ {
49719+ .procname = "chroot_deny_sysctl",
49720+ .data = &grsec_enable_chroot_sysctl,
49721+ .maxlen = sizeof(int),
49722+ .mode = 0600,
49723+ .proc_handler = &proc_dointvec,
49724+ },
49725+#endif
49726+#ifdef CONFIG_GRKERNSEC_TPE
49727+ {
49728+ .procname = "tpe",
49729+ .data = &grsec_enable_tpe,
49730+ .maxlen = sizeof(int),
49731+ .mode = 0600,
49732+ .proc_handler = &proc_dointvec,
49733+ },
49734+ {
49735+ .procname = "tpe_gid",
49736+ .data = &grsec_tpe_gid,
49737+ .maxlen = sizeof(int),
49738+ .mode = 0600,
49739+ .proc_handler = &proc_dointvec,
49740+ },
49741+#endif
49742+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49743+ {
49744+ .procname = "tpe_invert",
49745+ .data = &grsec_enable_tpe_invert,
49746+ .maxlen = sizeof(int),
49747+ .mode = 0600,
49748+ .proc_handler = &proc_dointvec,
49749+ },
49750+#endif
49751+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49752+ {
49753+ .procname = "tpe_restrict_all",
49754+ .data = &grsec_enable_tpe_all,
49755+ .maxlen = sizeof(int),
49756+ .mode = 0600,
49757+ .proc_handler = &proc_dointvec,
49758+ },
49759+#endif
49760+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49761+ {
49762+ .procname = "socket_all",
49763+ .data = &grsec_enable_socket_all,
49764+ .maxlen = sizeof(int),
49765+ .mode = 0600,
49766+ .proc_handler = &proc_dointvec,
49767+ },
49768+ {
49769+ .procname = "socket_all_gid",
49770+ .data = &grsec_socket_all_gid,
49771+ .maxlen = sizeof(int),
49772+ .mode = 0600,
49773+ .proc_handler = &proc_dointvec,
49774+ },
49775+#endif
49776+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49777+ {
49778+ .procname = "socket_client",
49779+ .data = &grsec_enable_socket_client,
49780+ .maxlen = sizeof(int),
49781+ .mode = 0600,
49782+ .proc_handler = &proc_dointvec,
49783+ },
49784+ {
49785+ .procname = "socket_client_gid",
49786+ .data = &grsec_socket_client_gid,
49787+ .maxlen = sizeof(int),
49788+ .mode = 0600,
49789+ .proc_handler = &proc_dointvec,
49790+ },
49791+#endif
49792+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49793+ {
49794+ .procname = "socket_server",
49795+ .data = &grsec_enable_socket_server,
49796+ .maxlen = sizeof(int),
49797+ .mode = 0600,
49798+ .proc_handler = &proc_dointvec,
49799+ },
49800+ {
49801+ .procname = "socket_server_gid",
49802+ .data = &grsec_socket_server_gid,
49803+ .maxlen = sizeof(int),
49804+ .mode = 0600,
49805+ .proc_handler = &proc_dointvec,
49806+ },
49807+#endif
49808+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49809+ {
49810+ .procname = "audit_group",
49811+ .data = &grsec_enable_group,
49812+ .maxlen = sizeof(int),
49813+ .mode = 0600,
49814+ .proc_handler = &proc_dointvec,
49815+ },
49816+ {
49817+ .procname = "audit_gid",
49818+ .data = &grsec_audit_gid,
49819+ .maxlen = sizeof(int),
49820+ .mode = 0600,
49821+ .proc_handler = &proc_dointvec,
49822+ },
49823+#endif
49824+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49825+ {
49826+ .procname = "audit_chdir",
49827+ .data = &grsec_enable_chdir,
49828+ .maxlen = sizeof(int),
49829+ .mode = 0600,
49830+ .proc_handler = &proc_dointvec,
49831+ },
49832+#endif
49833+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49834+ {
49835+ .procname = "audit_mount",
49836+ .data = &grsec_enable_mount,
49837+ .maxlen = sizeof(int),
49838+ .mode = 0600,
49839+ .proc_handler = &proc_dointvec,
49840+ },
49841+#endif
49842+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49843+ {
49844+ .procname = "audit_textrel",
49845+ .data = &grsec_enable_audit_textrel,
49846+ .maxlen = sizeof(int),
49847+ .mode = 0600,
49848+ .proc_handler = &proc_dointvec,
49849+ },
49850+#endif
49851+#ifdef CONFIG_GRKERNSEC_DMESG
49852+ {
49853+ .procname = "dmesg",
49854+ .data = &grsec_enable_dmesg,
49855+ .maxlen = sizeof(int),
49856+ .mode = 0600,
49857+ .proc_handler = &proc_dointvec,
49858+ },
49859+#endif
49860+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49861+ {
49862+ .procname = "chroot_findtask",
49863+ .data = &grsec_enable_chroot_findtask,
49864+ .maxlen = sizeof(int),
49865+ .mode = 0600,
49866+ .proc_handler = &proc_dointvec,
49867+ },
49868+#endif
49869+#ifdef CONFIG_GRKERNSEC_RESLOG
49870+ {
49871+ .procname = "resource_logging",
49872+ .data = &grsec_resource_logging,
49873+ .maxlen = sizeof(int),
49874+ .mode = 0600,
49875+ .proc_handler = &proc_dointvec,
49876+ },
49877+#endif
49878+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49879+ {
49880+ .procname = "audit_ptrace",
49881+ .data = &grsec_enable_audit_ptrace,
49882+ .maxlen = sizeof(int),
49883+ .mode = 0600,
49884+ .proc_handler = &proc_dointvec,
49885+ },
49886+#endif
49887+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49888+ {
49889+ .procname = "harden_ptrace",
49890+ .data = &grsec_enable_harden_ptrace,
49891+ .maxlen = sizeof(int),
49892+ .mode = 0600,
49893+ .proc_handler = &proc_dointvec,
49894+ },
49895+#endif
49896+ {
49897+ .procname = "grsec_lock",
49898+ .data = &grsec_lock,
49899+ .maxlen = sizeof(int),
49900+ .mode = 0600,
49901+ .proc_handler = &proc_dointvec,
49902+ },
49903+#endif
49904+#ifdef CONFIG_GRKERNSEC_ROFS
49905+ {
49906+ .procname = "romount_protect",
49907+ .data = &grsec_enable_rofs,
49908+ .maxlen = sizeof(int),
49909+ .mode = 0600,
49910+ .proc_handler = &proc_dointvec_minmax,
49911+ .extra1 = &one,
49912+ .extra2 = &one,
49913+ },
49914+#endif
49915+ { }
49916+};
49917+#endif
49918diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49919--- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49920+++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49921@@ -0,0 +1,16 @@
49922+#include <linux/kernel.h>
49923+#include <linux/sched.h>
49924+#include <linux/grinternal.h>
49925+#include <linux/module.h>
49926+
49927+void
49928+gr_log_timechange(void)
49929+{
49930+#ifdef CONFIG_GRKERNSEC_TIME
49931+ if (grsec_enable_time)
49932+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49933+#endif
49934+ return;
49935+}
49936+
49937+EXPORT_SYMBOL(gr_log_timechange);
49938diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49939--- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49940+++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49941@@ -0,0 +1,39 @@
49942+#include <linux/kernel.h>
49943+#include <linux/sched.h>
49944+#include <linux/file.h>
49945+#include <linux/fs.h>
49946+#include <linux/grinternal.h>
49947+
49948+extern int gr_acl_tpe_check(void);
49949+
49950+int
49951+gr_tpe_allow(const struct file *file)
49952+{
49953+#ifdef CONFIG_GRKERNSEC
49954+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49955+ const struct cred *cred = current_cred();
49956+
49957+ if (cred->uid && ((grsec_enable_tpe &&
49958+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49959+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49960+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49961+#else
49962+ in_group_p(grsec_tpe_gid)
49963+#endif
49964+ ) || gr_acl_tpe_check()) &&
49965+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49966+ (inode->i_mode & S_IWOTH))))) {
49967+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49968+ return 0;
49969+ }
49970+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49971+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49972+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49973+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49974+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49975+ return 0;
49976+ }
49977+#endif
49978+#endif
49979+ return 1;
49980+}
49981diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
49982--- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49983+++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
49984@@ -0,0 +1,61 @@
49985+#include <linux/err.h>
49986+#include <linux/kernel.h>
49987+#include <linux/sched.h>
49988+#include <linux/mm.h>
49989+#include <linux/scatterlist.h>
49990+#include <linux/crypto.h>
49991+#include <linux/gracl.h>
49992+
49993+
49994+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49995+#error "crypto and sha256 must be built into the kernel"
49996+#endif
49997+
49998+int
49999+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
50000+{
50001+ char *p;
50002+ struct crypto_hash *tfm;
50003+ struct hash_desc desc;
50004+ struct scatterlist sg;
50005+ unsigned char temp_sum[GR_SHA_LEN];
50006+ volatile int retval = 0;
50007+ volatile int dummy = 0;
50008+ unsigned int i;
50009+
50010+ sg_init_table(&sg, 1);
50011+
50012+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
50013+ if (IS_ERR(tfm)) {
50014+ /* should never happen, since sha256 should be built in */
50015+ return 1;
50016+ }
50017+
50018+ desc.tfm = tfm;
50019+ desc.flags = 0;
50020+
50021+ crypto_hash_init(&desc);
50022+
50023+ p = salt;
50024+ sg_set_buf(&sg, p, GR_SALT_LEN);
50025+ crypto_hash_update(&desc, &sg, sg.length);
50026+
50027+ p = entry->pw;
50028+ sg_set_buf(&sg, p, strlen(p));
50029+
50030+ crypto_hash_update(&desc, &sg, sg.length);
50031+
50032+ crypto_hash_final(&desc, temp_sum);
50033+
50034+ memset(entry->pw, 0, GR_PW_LEN);
50035+
50036+ for (i = 0; i < GR_SHA_LEN; i++)
50037+ if (sum[i] != temp_sum[i])
50038+ retval = 1;
50039+ else
50040+ dummy = 1; // waste a cycle
50041+
50042+ crypto_free_hash(tfm);
50043+
50044+ return retval;
50045+}
50046diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
50047--- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
50048+++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-05 19:44:37.000000000 -0400
50049@@ -0,0 +1,1048 @@
50050+#
50051+# grecurity configuration
50052+#
50053+
50054+menu "Grsecurity"
50055+
50056+config GRKERNSEC
50057+ bool "Grsecurity"
50058+ select CRYPTO
50059+ select CRYPTO_SHA256
50060+ help
50061+ If you say Y here, you will be able to configure many features
50062+ that will enhance the security of your system. It is highly
50063+ recommended that you say Y here and read through the help
50064+ for each option so that you fully understand the features and
50065+ can evaluate their usefulness for your machine.
50066+
50067+choice
50068+ prompt "Security Level"
50069+ depends on GRKERNSEC
50070+ default GRKERNSEC_CUSTOM
50071+
50072+config GRKERNSEC_LOW
50073+ bool "Low"
50074+ select GRKERNSEC_LINK
50075+ select GRKERNSEC_FIFO
50076+ select GRKERNSEC_EXECVE
50077+ select GRKERNSEC_RANDNET
50078+ select GRKERNSEC_DMESG
50079+ select GRKERNSEC_CHROOT
50080+ select GRKERNSEC_CHROOT_CHDIR
50081+
50082+ help
50083+ If you choose this option, several of the grsecurity options will
50084+ be enabled that will give you greater protection against a number
50085+ of attacks, while assuring that none of your software will have any
50086+ conflicts with the additional security measures. If you run a lot
50087+ of unusual software, or you are having problems with the higher
50088+ security levels, you should say Y here. With this option, the
50089+ following features are enabled:
50090+
50091+ - Linking restrictions
50092+ - FIFO restrictions
50093+ - Enforcing RLIMIT_NPROC on execve
50094+ - Restricted dmesg
50095+ - Enforced chdir("/") on chroot
50096+ - Runtime module disabling
50097+
50098+config GRKERNSEC_MEDIUM
50099+ bool "Medium"
50100+ select PAX
50101+ select PAX_EI_PAX
50102+ select PAX_PT_PAX_FLAGS
50103+ select PAX_HAVE_ACL_FLAGS
50104+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50105+ select GRKERNSEC_CHROOT
50106+ select GRKERNSEC_CHROOT_SYSCTL
50107+ select GRKERNSEC_LINK
50108+ select GRKERNSEC_FIFO
50109+ select GRKERNSEC_EXECVE
50110+ select GRKERNSEC_DMESG
50111+ select GRKERNSEC_RANDNET
50112+ select GRKERNSEC_FORKFAIL
50113+ select GRKERNSEC_TIME
50114+ select GRKERNSEC_SIGNAL
50115+ select GRKERNSEC_CHROOT
50116+ select GRKERNSEC_CHROOT_UNIX
50117+ select GRKERNSEC_CHROOT_MOUNT
50118+ select GRKERNSEC_CHROOT_PIVOT
50119+ select GRKERNSEC_CHROOT_DOUBLE
50120+ select GRKERNSEC_CHROOT_CHDIR
50121+ select GRKERNSEC_CHROOT_MKNOD
50122+ select GRKERNSEC_PROC
50123+ select GRKERNSEC_PROC_USERGROUP
50124+ select PAX_RANDUSTACK
50125+ select PAX_ASLR
50126+ select PAX_RANDMMAP
50127+ select PAX_REFCOUNT if (X86 || SPARC64)
50128+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50129+
50130+ help
50131+ If you say Y here, several features in addition to those included
50132+ in the low additional security level will be enabled. These
50133+ features provide even more security to your system, though in rare
50134+ cases they may be incompatible with very old or poorly written
50135+ software. If you enable this option, make sure that your auth
50136+ service (identd) is running as gid 1001. With this option,
50137+ the following features (in addition to those provided in the
50138+ low additional security level) will be enabled:
50139+
50140+ - Failed fork logging
50141+ - Time change logging
50142+ - Signal logging
50143+ - Deny mounts in chroot
50144+ - Deny double chrooting
50145+ - Deny sysctl writes in chroot
50146+ - Deny mknod in chroot
50147+ - Deny access to abstract AF_UNIX sockets out of chroot
50148+ - Deny pivot_root in chroot
50149+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50150+ - /proc restrictions with special GID set to 10 (usually wheel)
50151+ - Address Space Layout Randomization (ASLR)
50152+ - Prevent exploitation of most refcount overflows
50153+ - Bounds checking of copying between the kernel and userland
50154+
50155+config GRKERNSEC_HIGH
50156+ bool "High"
50157+ select GRKERNSEC_LINK
50158+ select GRKERNSEC_FIFO
50159+ select GRKERNSEC_EXECVE
50160+ select GRKERNSEC_DMESG
50161+ select GRKERNSEC_FORKFAIL
50162+ select GRKERNSEC_TIME
50163+ select GRKERNSEC_SIGNAL
50164+ select GRKERNSEC_CHROOT
50165+ select GRKERNSEC_CHROOT_SHMAT
50166+ select GRKERNSEC_CHROOT_UNIX
50167+ select GRKERNSEC_CHROOT_MOUNT
50168+ select GRKERNSEC_CHROOT_FCHDIR
50169+ select GRKERNSEC_CHROOT_PIVOT
50170+ select GRKERNSEC_CHROOT_DOUBLE
50171+ select GRKERNSEC_CHROOT_CHDIR
50172+ select GRKERNSEC_CHROOT_MKNOD
50173+ select GRKERNSEC_CHROOT_CAPS
50174+ select GRKERNSEC_CHROOT_SYSCTL
50175+ select GRKERNSEC_CHROOT_FINDTASK
50176+ select GRKERNSEC_SYSFS_RESTRICT
50177+ select GRKERNSEC_PROC
50178+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50179+ select GRKERNSEC_HIDESYM
50180+ select GRKERNSEC_BRUTE
50181+ select GRKERNSEC_PROC_USERGROUP
50182+ select GRKERNSEC_KMEM
50183+ select GRKERNSEC_RESLOG
50184+ select GRKERNSEC_RANDNET
50185+ select GRKERNSEC_PROC_ADD
50186+ select GRKERNSEC_CHROOT_CHMOD
50187+ select GRKERNSEC_CHROOT_NICE
50188+ select GRKERNSEC_AUDIT_MOUNT
50189+ select GRKERNSEC_MODHARDEN if (MODULES)
50190+ select GRKERNSEC_HARDEN_PTRACE
50191+ select GRKERNSEC_VM86 if (X86_32)
50192+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50193+ select PAX
50194+ select PAX_RANDUSTACK
50195+ select PAX_ASLR
50196+ select PAX_RANDMMAP
50197+ select PAX_NOEXEC
50198+ select PAX_MPROTECT
50199+ select PAX_EI_PAX
50200+ select PAX_PT_PAX_FLAGS
50201+ select PAX_HAVE_ACL_FLAGS
50202+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50203+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50204+ select PAX_RANDKSTACK if (X86_TSC && X86)
50205+ select PAX_SEGMEXEC if (X86_32)
50206+ select PAX_PAGEEXEC
50207+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50208+ select PAX_EMUTRAMP if (PARISC)
50209+ select PAX_EMUSIGRT if (PARISC)
50210+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50211+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50212+ select PAX_REFCOUNT if (X86 || SPARC64)
50213+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50214+ help
50215+ If you say Y here, many of the features of grsecurity will be
50216+ enabled, which will protect you against many kinds of attacks
50217+ against your system. The heightened security comes at a cost
50218+ of an increased chance of incompatibilities with rare software
50219+ on your machine. Since this security level enables PaX, you should
50220+ view <http://pax.grsecurity.net> and read about the PaX
50221+ project. While you are there, download chpax and run it on
50222+ binaries that cause problems with PaX. Also remember that
50223+ since the /proc restrictions are enabled, you must run your
50224+ identd as gid 1001. This security level enables the following
50225+ features in addition to those listed in the low and medium
50226+ security levels:
50227+
50228+ - Additional /proc restrictions
50229+ - Chmod restrictions in chroot
50230+ - No signals, ptrace, or viewing of processes outside of chroot
50231+ - Capability restrictions in chroot
50232+ - Deny fchdir out of chroot
50233+ - Priority restrictions in chroot
50234+ - Segmentation-based implementation of PaX
50235+ - Mprotect restrictions
50236+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50237+ - Kernel stack randomization
50238+ - Mount/unmount/remount logging
50239+ - Kernel symbol hiding
50240+ - Prevention of memory exhaustion-based exploits
50241+ - Hardening of module auto-loading
50242+ - Ptrace restrictions
50243+ - Restricted vm86 mode
50244+ - Restricted sysfs/debugfs
50245+ - Active kernel exploit response
50246+
50247+config GRKERNSEC_CUSTOM
50248+ bool "Custom"
50249+ help
50250+ If you say Y here, you will be able to configure every grsecurity
50251+ option, which allows you to enable many more features that aren't
50252+ covered in the basic security levels. These additional features
50253+ include TPE, socket restrictions, and the sysctl system for
50254+ grsecurity. It is advised that you read through the help for
50255+ each option to determine its usefulness in your situation.
50256+
50257+endchoice
50258+
50259+menu "Address Space Protection"
50260+depends on GRKERNSEC
50261+
50262+config GRKERNSEC_KMEM
50263+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50264+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50265+ help
50266+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50267+ be written to via mmap or otherwise to modify the running kernel.
50268+ /dev/port will also not be allowed to be opened. If you have module
50269+ support disabled, enabling this will close up four ways that are
50270+ currently used to insert malicious code into the running kernel.
50271+ Even with all these features enabled, we still highly recommend that
50272+ you use the RBAC system, as it is still possible for an attacker to
50273+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50274+ If you are not using XFree86, you may be able to stop this additional
50275+ case by enabling the 'Disable privileged I/O' option. Though nothing
50276+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50277+ but only to video memory, which is the only writing we allow in this
50278+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50279+ not be allowed to mprotect it with PROT_WRITE later.
50280+ It is highly recommended that you say Y here if you meet all the
50281+ conditions above.
50282+
50283+config GRKERNSEC_VM86
50284+ bool "Restrict VM86 mode"
50285+ depends on X86_32
50286+
50287+ help
50288+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50289+ make use of a special execution mode on 32bit x86 processors called
50290+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50291+ video cards and will still work with this option enabled. The purpose
50292+ of the option is to prevent exploitation of emulation errors in
50293+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50294+ Nearly all users should be able to enable this option.
50295+
50296+config GRKERNSEC_IO
50297+ bool "Disable privileged I/O"
50298+ depends on X86
50299+ select RTC_CLASS
50300+ select RTC_INTF_DEV
50301+ select RTC_DRV_CMOS
50302+
50303+ help
50304+ If you say Y here, all ioperm and iopl calls will return an error.
50305+ Ioperm and iopl can be used to modify the running kernel.
50306+ Unfortunately, some programs need this access to operate properly,
50307+ the most notable of which are XFree86 and hwclock. hwclock can be
50308+ remedied by having RTC support in the kernel, so real-time
50309+ clock support is enabled if this option is enabled, to ensure
50310+ that hwclock operates correctly. XFree86 still will not
50311+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50312+ IF YOU USE XFree86. If you use XFree86 and you still want to
50313+ protect your kernel against modification, use the RBAC system.
50314+
50315+config GRKERNSEC_PROC_MEMMAP
50316+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50317+ default y if (PAX_NOEXEC || PAX_ASLR)
50318+ depends on PAX_NOEXEC || PAX_ASLR
50319+ help
50320+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50321+ give no information about the addresses of its mappings if
50322+ PaX features that rely on random addresses are enabled on the task.
50323+ If you use PaX it is greatly recommended that you say Y here as it
50324+ closes up a hole that makes the full ASLR useless for suid
50325+ binaries.
50326+
50327+config GRKERNSEC_BRUTE
50328+ bool "Deter exploit bruteforcing"
50329+ help
50330+ If you say Y here, attempts to bruteforce exploits against forking
50331+ daemons such as apache or sshd, as well as against suid/sgid binaries
50332+ will be deterred. When a child of a forking daemon is killed by PaX
50333+ or crashes due to an illegal instruction or other suspicious signal,
50334+ the parent process will be delayed 30 seconds upon every subsequent
50335+ fork until the administrator is able to assess the situation and
50336+ restart the daemon.
50337+ In the suid/sgid case, the attempt is logged, the user has all their
50338+ processes terminated, and they are prevented from executing any further
50339+ processes for 15 minutes.
50340+ It is recommended that you also enable signal logging in the auditing
50341+ section so that logs are generated when a process triggers a suspicious
50342+ signal.
50343+ If the sysctl option is enabled, a sysctl option with name
50344+ "deter_bruteforce" is created.
50345+
50346+
50347+config GRKERNSEC_MODHARDEN
50348+ bool "Harden module auto-loading"
50349+ depends on MODULES
50350+ help
50351+ If you say Y here, module auto-loading in response to use of some
50352+ feature implemented by an unloaded module will be restricted to
50353+ root users. Enabling this option helps defend against attacks
50354+ by unprivileged users who abuse the auto-loading behavior to
50355+ cause a vulnerable module to load that is then exploited.
50356+
50357+ If this option prevents a legitimate use of auto-loading for a
50358+ non-root user, the administrator can execute modprobe manually
50359+ with the exact name of the module mentioned in the alert log.
50360+ Alternatively, the administrator can add the module to the list
50361+ of modules loaded at boot by modifying init scripts.
50362+
50363+ Modification of init scripts will most likely be needed on
50364+ Ubuntu servers with encrypted home directory support enabled,
50365+ as the first non-root user logging in will cause the ecb(aes),
50366+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50367+
50368+config GRKERNSEC_HIDESYM
50369+ bool "Hide kernel symbols"
50370+ help
50371+ If you say Y here, getting information on loaded modules, and
50372+ displaying all kernel symbols through a syscall will be restricted
50373+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50374+ /proc/kallsyms will be restricted to the root user. The RBAC
50375+ system can hide that entry even from root.
50376+
50377+ This option also prevents leaking of kernel addresses through
50378+ several /proc entries.
50379+
50380+ Note that this option is only effective provided the following
50381+ conditions are met:
50382+ 1) The kernel using grsecurity is not precompiled by some distribution
50383+ 2) You have also enabled GRKERNSEC_DMESG
50384+ 3) You are using the RBAC system and hiding other files such as your
50385+ kernel image and System.map. Alternatively, enabling this option
50386+ causes the permissions on /boot, /lib/modules, and the kernel
50387+ source directory to change at compile time to prevent
50388+ reading by non-root users.
50389+ If the above conditions are met, this option will aid in providing a
50390+ useful protection against local kernel exploitation of overflows
50391+ and arbitrary read/write vulnerabilities.
50392+
50393+config GRKERNSEC_KERN_LOCKOUT
50394+ bool "Active kernel exploit response"
50395+ depends on X86 || ARM || PPC || SPARC
50396+ help
50397+ If you say Y here, when a PaX alert is triggered due to suspicious
50398+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50399+ or an OOPs occurs due to bad memory accesses, instead of just
50400+ terminating the offending process (and potentially allowing
50401+ a subsequent exploit from the same user), we will take one of two
50402+ actions:
50403+ If the user was root, we will panic the system
50404+ If the user was non-root, we will log the attempt, terminate
50405+ all processes owned by the user, then prevent them from creating
50406+ any new processes until the system is restarted
50407+ This deters repeated kernel exploitation/bruteforcing attempts
50408+ and is useful for later forensics.
50409+
50410+endmenu
50411+menu "Role Based Access Control Options"
50412+depends on GRKERNSEC
50413+
50414+config GRKERNSEC_RBAC_DEBUG
50415+ bool
50416+
50417+config GRKERNSEC_NO_RBAC
50418+ bool "Disable RBAC system"
50419+ help
50420+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50421+ preventing the RBAC system from being enabled. You should only say Y
50422+ here if you have no intention of using the RBAC system, so as to prevent
50423+ an attacker with root access from misusing the RBAC system to hide files
50424+ and processes when loadable module support and /dev/[k]mem have been
50425+ locked down.
50426+
50427+config GRKERNSEC_ACL_HIDEKERN
50428+ bool "Hide kernel processes"
50429+ help
50430+ If you say Y here, all kernel threads will be hidden to all
50431+ processes but those whose subject has the "view hidden processes"
50432+ flag.
50433+
50434+config GRKERNSEC_ACL_MAXTRIES
50435+ int "Maximum tries before password lockout"
50436+ default 3
50437+ help
50438+ This option enforces the maximum number of times a user can attempt
50439+ to authorize themselves with the grsecurity RBAC system before being
50440+ denied the ability to attempt authorization again for a specified time.
50441+ The lower the number, the harder it will be to brute-force a password.
50442+
50443+config GRKERNSEC_ACL_TIMEOUT
50444+ int "Time to wait after max password tries, in seconds"
50445+ default 30
50446+ help
50447+ This option specifies the time the user must wait after attempting to
50448+ authorize to the RBAC system with the maximum number of invalid
50449+ passwords. The higher the number, the harder it will be to brute-force
50450+ a password.
50451+
50452+endmenu
50453+menu "Filesystem Protections"
50454+depends on GRKERNSEC
50455+
50456+config GRKERNSEC_PROC
50457+ bool "Proc restrictions"
50458+ help
50459+ If you say Y here, the permissions of the /proc filesystem
50460+ will be altered to enhance system security and privacy. You MUST
50461+ choose either a user only restriction or a user and group restriction.
50462+ Depending upon the option you choose, you can either restrict users to
50463+ see only the processes they themselves run, or choose a group that can
50464+ view all processes and files normally restricted to root if you choose
50465+ the "restrict to user only" option. NOTE: If you're running identd as
50466+ a non-root user, you will have to run it as the group you specify here.
50467+
50468+config GRKERNSEC_PROC_USER
50469+ bool "Restrict /proc to user only"
50470+ depends on GRKERNSEC_PROC
50471+ help
50472+ If you say Y here, non-root users will only be able to view their own
50473+ processes, and restricts them from viewing network-related information,
50474+ and viewing kernel symbol and module information.
50475+
50476+config GRKERNSEC_PROC_USERGROUP
50477+ bool "Allow special group"
50478+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50479+ help
50480+ If you say Y here, you will be able to select a group that will be
50481+ able to view all processes and network-related information. If you've
50482+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50483+ remain hidden. This option is useful if you want to run identd as
50484+ a non-root user.
50485+
50486+config GRKERNSEC_PROC_GID
50487+ int "GID for special group"
50488+ depends on GRKERNSEC_PROC_USERGROUP
50489+ default 1001
50490+
50491+config GRKERNSEC_PROC_ADD
50492+ bool "Additional restrictions"
50493+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50494+ help
50495+ If you say Y here, additional restrictions will be placed on
50496+ /proc that keep normal users from viewing device information and
50497+ slabinfo information that could be useful for exploits.
50498+
50499+config GRKERNSEC_LINK
50500+ bool "Linking restrictions"
50501+ help
50502+ If you say Y here, /tmp race exploits will be prevented, since users
50503+ will no longer be able to follow symlinks owned by other users in
50504+ world-writable +t directories (e.g. /tmp), unless the owner of the
50505+ symlink is the owner of the directory. users will also not be
50506+ able to hardlink to files they do not own. If the sysctl option is
50507+ enabled, a sysctl option with name "linking_restrictions" is created.
50508+
50509+config GRKERNSEC_FIFO
50510+ bool "FIFO restrictions"
50511+ help
50512+ If you say Y here, users will not be able to write to FIFOs they don't
50513+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50514+ the FIFO is the same owner of the directory it's held in. If the sysctl
50515+ option is enabled, a sysctl option with name "fifo_restrictions" is
50516+ created.
50517+
50518+config GRKERNSEC_SYSFS_RESTRICT
50519+ bool "Sysfs/debugfs restriction"
50520+ depends on SYSFS
50521+ help
50522+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50523+ any filesystem normally mounted under it (e.g. debugfs) will only
50524+ be accessible by root. These filesystems generally provide access
50525+ to hardware and debug information that isn't appropriate for unprivileged
50526+ users of the system. Sysfs and debugfs have also become a large source
50527+ of new vulnerabilities, ranging from infoleaks to local compromise.
50528+ There has been very little oversight with an eye toward security involved
50529+ in adding new exporters of information to these filesystems, so their
50530+ use is discouraged.
50531+ This option is equivalent to a chmod 0700 of the mount paths.
50532+
50533+config GRKERNSEC_ROFS
50534+ bool "Runtime read-only mount protection"
50535+ help
50536+ If you say Y here, a sysctl option with name "romount_protect" will
50537+ be created. By setting this option to 1 at runtime, filesystems
50538+ will be protected in the following ways:
50539+ * No new writable mounts will be allowed
50540+ * Existing read-only mounts won't be able to be remounted read/write
50541+ * Write operations will be denied on all block devices
50542+ This option acts independently of grsec_lock: once it is set to 1,
50543+ it cannot be turned off. Therefore, please be mindful of the resulting
50544+ behavior if this option is enabled in an init script on a read-only
50545+ filesystem. This feature is mainly intended for secure embedded systems.
50546+
50547+config GRKERNSEC_CHROOT
50548+ bool "Chroot jail restrictions"
50549+ help
50550+ If you say Y here, you will be able to choose several options that will
50551+ make breaking out of a chrooted jail much more difficult. If you
50552+ encounter no software incompatibilities with the following options, it
50553+ is recommended that you enable each one.
50554+
50555+config GRKERNSEC_CHROOT_MOUNT
50556+ bool "Deny mounts"
50557+ depends on GRKERNSEC_CHROOT
50558+ help
50559+ If you say Y here, processes inside a chroot will not be able to
50560+ mount or remount filesystems. If the sysctl option is enabled, a
50561+ sysctl option with name "chroot_deny_mount" is created.
50562+
50563+config GRKERNSEC_CHROOT_DOUBLE
50564+ bool "Deny double-chroots"
50565+ depends on GRKERNSEC_CHROOT
50566+ help
50567+ If you say Y here, processes inside a chroot will not be able to chroot
50568+ again outside the chroot. This is a widely used method of breaking
50569+ out of a chroot jail and should not be allowed. If the sysctl
50570+ option is enabled, a sysctl option with name
50571+ "chroot_deny_chroot" is created.
50572+
50573+config GRKERNSEC_CHROOT_PIVOT
50574+ bool "Deny pivot_root in chroot"
50575+ depends on GRKERNSEC_CHROOT
50576+ help
50577+ If you say Y here, processes inside a chroot will not be able to use
50578+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50579+ works similar to chroot in that it changes the root filesystem. This
50580+ function could be misused in a chrooted process to attempt to break out
50581+ of the chroot, and therefore should not be allowed. If the sysctl
50582+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50583+ created.
50584+
50585+config GRKERNSEC_CHROOT_CHDIR
50586+ bool "Enforce chdir(\"/\") on all chroots"
50587+ depends on GRKERNSEC_CHROOT
50588+ help
50589+ If you say Y here, the current working directory of all newly-chrooted
50590+ applications will be set to the the root directory of the chroot.
50591+ The man page on chroot(2) states:
50592+ Note that this call does not change the current working
50593+ directory, so that `.' can be outside the tree rooted at
50594+ `/'. In particular, the super-user can escape from a
50595+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50596+
50597+ It is recommended that you say Y here, since it's not known to break
50598+ any software. If the sysctl option is enabled, a sysctl option with
50599+ name "chroot_enforce_chdir" is created.
50600+
50601+config GRKERNSEC_CHROOT_CHMOD
50602+ bool "Deny (f)chmod +s"
50603+ depends on GRKERNSEC_CHROOT
50604+ help
50605+ If you say Y here, processes inside a chroot will not be able to chmod
50606+ or fchmod files to make them have suid or sgid bits. This protects
50607+ against another published method of breaking a chroot. If the sysctl
50608+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50609+ created.
50610+
50611+config GRKERNSEC_CHROOT_FCHDIR
50612+ bool "Deny fchdir out of chroot"
50613+ depends on GRKERNSEC_CHROOT
50614+ help
50615+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50616+ to a file descriptor of the chrooting process that points to a directory
50617+ outside the filesystem will be stopped. If the sysctl option
50618+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50619+
50620+config GRKERNSEC_CHROOT_MKNOD
50621+ bool "Deny mknod"
50622+ depends on GRKERNSEC_CHROOT
50623+ help
50624+ If you say Y here, processes inside a chroot will not be allowed to
50625+ mknod. The problem with using mknod inside a chroot is that it
50626+ would allow an attacker to create a device entry that is the same
50627+ as one on the physical root of your system, which could range from
50628+ anything from the console device to a device for your harddrive (which
50629+ they could then use to wipe the drive or steal data). It is recommended
50630+ that you say Y here, unless you run into software incompatibilities.
50631+ If the sysctl option is enabled, a sysctl option with name
50632+ "chroot_deny_mknod" is created.
50633+
50634+config GRKERNSEC_CHROOT_SHMAT
50635+ bool "Deny shmat() out of chroot"
50636+ depends on GRKERNSEC_CHROOT
50637+ help
50638+ If you say Y here, processes inside a chroot will not be able to attach
50639+ to shared memory segments that were created outside of the chroot jail.
50640+ It is recommended that you say Y here. If the sysctl option is enabled,
50641+ a sysctl option with name "chroot_deny_shmat" is created.
50642+
50643+config GRKERNSEC_CHROOT_UNIX
50644+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50645+ depends on GRKERNSEC_CHROOT
50646+ help
50647+ If you say Y here, processes inside a chroot will not be able to
50648+ connect to abstract (meaning not belonging to a filesystem) Unix
50649+ domain sockets that were bound outside of a chroot. It is recommended
50650+ that you say Y here. If the sysctl option is enabled, a sysctl option
50651+ with name "chroot_deny_unix" is created.
50652+
50653+config GRKERNSEC_CHROOT_FINDTASK
50654+ bool "Protect outside processes"
50655+ depends on GRKERNSEC_CHROOT
50656+ help
50657+ If you say Y here, processes inside a chroot will not be able to
50658+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50659+ getsid, or view any process outside of the chroot. If the sysctl
50660+ option is enabled, a sysctl option with name "chroot_findtask" is
50661+ created.
50662+
50663+config GRKERNSEC_CHROOT_NICE
50664+ bool "Restrict priority changes"
50665+ depends on GRKERNSEC_CHROOT
50666+ help
50667+ If you say Y here, processes inside a chroot will not be able to raise
50668+ the priority of processes in the chroot, or alter the priority of
50669+ processes outside the chroot. This provides more security than simply
50670+ removing CAP_SYS_NICE from the process' capability set. If the
50671+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50672+ is created.
50673+
50674+config GRKERNSEC_CHROOT_SYSCTL
50675+ bool "Deny sysctl writes"
50676+ depends on GRKERNSEC_CHROOT
50677+ help
50678+ If you say Y here, an attacker in a chroot will not be able to
50679+ write to sysctl entries, either by sysctl(2) or through a /proc
50680+ interface. It is strongly recommended that you say Y here. If the
50681+ sysctl option is enabled, a sysctl option with name
50682+ "chroot_deny_sysctl" is created.
50683+
50684+config GRKERNSEC_CHROOT_CAPS
50685+ bool "Capability restrictions"
50686+ depends on GRKERNSEC_CHROOT
50687+ help
50688+ If you say Y here, the capabilities on all root processes within a
50689+ chroot jail will be lowered to stop module insertion, raw i/o,
50690+ system and net admin tasks, rebooting the system, modifying immutable
50691+ files, modifying IPC owned by another, and changing the system time.
50692+ This is left an option because it can break some apps. Disable this
50693+ if your chrooted apps are having problems performing those kinds of
50694+ tasks. If the sysctl option is enabled, a sysctl option with
50695+ name "chroot_caps" is created.
50696+
50697+endmenu
50698+menu "Kernel Auditing"
50699+depends on GRKERNSEC
50700+
50701+config GRKERNSEC_AUDIT_GROUP
50702+ bool "Single group for auditing"
50703+ help
50704+ If you say Y here, the exec, chdir, and (un)mount logging features
50705+ will only operate on a group you specify. This option is recommended
50706+ if you only want to watch certain users instead of having a large
50707+ amount of logs from the entire system. If the sysctl option is enabled,
50708+ a sysctl option with name "audit_group" is created.
50709+
50710+config GRKERNSEC_AUDIT_GID
50711+ int "GID for auditing"
50712+ depends on GRKERNSEC_AUDIT_GROUP
50713+ default 1007
50714+
50715+config GRKERNSEC_EXECLOG
50716+ bool "Exec logging"
50717+ help
50718+ If you say Y here, all execve() calls will be logged (since the
50719+ other exec*() calls are frontends to execve(), all execution
50720+ will be logged). Useful for shell-servers that like to keep track
50721+ of their users. If the sysctl option is enabled, a sysctl option with
50722+ name "exec_logging" is created.
50723+ WARNING: This option when enabled will produce a LOT of logs, especially
50724+ on an active system.
50725+
50726+config GRKERNSEC_RESLOG
50727+ bool "Resource logging"
50728+ help
50729+ If you say Y here, all attempts to overstep resource limits will
50730+ be logged with the resource name, the requested size, and the current
50731+ limit. It is highly recommended that you say Y here. If the sysctl
50732+ option is enabled, a sysctl option with name "resource_logging" is
50733+ created. If the RBAC system is enabled, the sysctl value is ignored.
50734+
50735+config GRKERNSEC_CHROOT_EXECLOG
50736+ bool "Log execs within chroot"
50737+ help
50738+ If you say Y here, all executions inside a chroot jail will be logged
50739+ to syslog. This can cause a large amount of logs if certain
50740+ applications (eg. djb's daemontools) are installed on the system, and
50741+ is therefore left as an option. If the sysctl option is enabled, a
50742+ sysctl option with name "chroot_execlog" is created.
50743+
50744+config GRKERNSEC_AUDIT_PTRACE
50745+ bool "Ptrace logging"
50746+ help
50747+ If you say Y here, all attempts to attach to a process via ptrace
50748+ will be logged. If the sysctl option is enabled, a sysctl option
50749+ with name "audit_ptrace" is created.
50750+
50751+config GRKERNSEC_AUDIT_CHDIR
50752+ bool "Chdir logging"
50753+ help
50754+ If you say Y here, all chdir() calls will be logged. If the sysctl
50755+ option is enabled, a sysctl option with name "audit_chdir" is created.
50756+
50757+config GRKERNSEC_AUDIT_MOUNT
50758+ bool "(Un)Mount logging"
50759+ help
50760+ If you say Y here, all mounts and unmounts will be logged. If the
50761+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50762+ created.
50763+
50764+config GRKERNSEC_SIGNAL
50765+ bool "Signal logging"
50766+ help
50767+ If you say Y here, certain important signals will be logged, such as
50768+ SIGSEGV, which will as a result inform you of when a error in a program
50769+ occurred, which in some cases could mean a possible exploit attempt.
50770+ If the sysctl option is enabled, a sysctl option with name
50771+ "signal_logging" is created.
50772+
50773+config GRKERNSEC_FORKFAIL
50774+ bool "Fork failure logging"
50775+ help
50776+ If you say Y here, all failed fork() attempts will be logged.
50777+ This could suggest a fork bomb, or someone attempting to overstep
50778+ their process limit. If the sysctl option is enabled, a sysctl option
50779+ with name "forkfail_logging" is created.
50780+
50781+config GRKERNSEC_TIME
50782+ bool "Time change logging"
50783+ help
50784+ If you say Y here, any changes of the system clock will be logged.
50785+ If the sysctl option is enabled, a sysctl option with name
50786+ "timechange_logging" is created.
50787+
50788+config GRKERNSEC_PROC_IPADDR
50789+ bool "/proc/<pid>/ipaddr support"
50790+ help
50791+ If you say Y here, a new entry will be added to each /proc/<pid>
50792+ directory that contains the IP address of the person using the task.
50793+ The IP is carried across local TCP and AF_UNIX stream sockets.
50794+ This information can be useful for IDS/IPSes to perform remote response
50795+ to a local attack. The entry is readable by only the owner of the
50796+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50797+ the RBAC system), and thus does not create privacy concerns.
50798+
50799+config GRKERNSEC_RWXMAP_LOG
50800+ bool 'Denied RWX mmap/mprotect logging'
50801+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50802+ help
50803+ If you say Y here, calls to mmap() and mprotect() with explicit
50804+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50805+ denied by the PAX_MPROTECT feature. If the sysctl option is
50806+ enabled, a sysctl option with name "rwxmap_logging" is created.
50807+
50808+config GRKERNSEC_AUDIT_TEXTREL
50809+ bool 'ELF text relocations logging (READ HELP)'
50810+ depends on PAX_MPROTECT
50811+ help
50812+ If you say Y here, text relocations will be logged with the filename
50813+ of the offending library or binary. The purpose of the feature is
50814+ to help Linux distribution developers get rid of libraries and
50815+ binaries that need text relocations which hinder the future progress
50816+ of PaX. Only Linux distribution developers should say Y here, and
50817+ never on a production machine, as this option creates an information
50818+ leak that could aid an attacker in defeating the randomization of
50819+ a single memory region. If the sysctl option is enabled, a sysctl
50820+ option with name "audit_textrel" is created.
50821+
50822+endmenu
50823+
50824+menu "Executable Protections"
50825+depends on GRKERNSEC
50826+
50827+config GRKERNSEC_EXECVE
50828+ bool "Enforce RLIMIT_NPROC on execs"
50829+ help
50830+ If you say Y here, users with a resource limit on processes will
50831+ have the value checked during execve() calls. The current system
50832+ only checks the system limit during fork() calls. If the sysctl option
50833+ is enabled, a sysctl option with name "execve_limiting" is created.
50834+
50835+config GRKERNSEC_DMESG
50836+ bool "Dmesg(8) restriction"
50837+ help
50838+ If you say Y here, non-root users will not be able to use dmesg(8)
50839+ to view up to the last 4kb of messages in the kernel's log buffer.
50840+ The kernel's log buffer often contains kernel addresses and other
50841+ identifying information useful to an attacker in fingerprinting a
50842+ system for a targeted exploit.
50843+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50844+ created.
50845+
50846+config GRKERNSEC_HARDEN_PTRACE
50847+ bool "Deter ptrace-based process snooping"
50848+ help
50849+ If you say Y here, TTY sniffers and other malicious monitoring
50850+ programs implemented through ptrace will be defeated. If you
50851+ have been using the RBAC system, this option has already been
50852+ enabled for several years for all users, with the ability to make
50853+ fine-grained exceptions.
50854+
50855+ This option only affects the ability of non-root users to ptrace
50856+ processes that are not a descendent of the ptracing process.
50857+ This means that strace ./binary and gdb ./binary will still work,
50858+ but attaching to arbitrary processes will not. If the sysctl
50859+ option is enabled, a sysctl option with name "harden_ptrace" is
50860+ created.
50861+
50862+config GRKERNSEC_TPE
50863+ bool "Trusted Path Execution (TPE)"
50864+ help
50865+ If you say Y here, you will be able to choose a gid to add to the
50866+ supplementary groups of users you want to mark as "untrusted."
50867+ These users will not be able to execute any files that are not in
50868+ root-owned directories writable only by root. If the sysctl option
50869+ is enabled, a sysctl option with name "tpe" is created.
50870+
50871+config GRKERNSEC_TPE_ALL
50872+ bool "Partially restrict all non-root users"
50873+ depends on GRKERNSEC_TPE
50874+ help
50875+ If you say Y here, all non-root users will be covered under
50876+ a weaker TPE restriction. This is separate from, and in addition to,
50877+ the main TPE options that you have selected elsewhere. Thus, if a
50878+ "trusted" GID is chosen, this restriction applies to even that GID.
50879+ Under this restriction, all non-root users will only be allowed to
50880+ execute files in directories they own that are not group or
50881+ world-writable, or in directories owned by root and writable only by
50882+ root. If the sysctl option is enabled, a sysctl option with name
50883+ "tpe_restrict_all" is created.
50884+
50885+config GRKERNSEC_TPE_INVERT
50886+ bool "Invert GID option"
50887+ depends on GRKERNSEC_TPE
50888+ help
50889+ If you say Y here, the group you specify in the TPE configuration will
50890+ decide what group TPE restrictions will be *disabled* for. This
50891+ option is useful if you want TPE restrictions to be applied to most
50892+ users on the system. If the sysctl option is enabled, a sysctl option
50893+ with name "tpe_invert" is created. Unlike other sysctl options, this
50894+ entry will default to on for backward-compatibility.
50895+
50896+config GRKERNSEC_TPE_GID
50897+ int "GID for untrusted users"
50898+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50899+ default 1005
50900+ help
50901+ Setting this GID determines what group TPE restrictions will be
50902+ *enabled* for. If the sysctl option is enabled, a sysctl option
50903+ with name "tpe_gid" is created.
50904+
50905+config GRKERNSEC_TPE_GID
50906+ int "GID for trusted users"
50907+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50908+ default 1005
50909+ help
50910+ Setting this GID determines what group TPE restrictions will be
50911+ *disabled* for. If the sysctl option is enabled, a sysctl option
50912+ with name "tpe_gid" is created.
50913+
50914+endmenu
50915+menu "Network Protections"
50916+depends on GRKERNSEC
50917+
50918+config GRKERNSEC_RANDNET
50919+ bool "Larger entropy pools"
50920+ help
50921+ If you say Y here, the entropy pools used for many features of Linux
50922+ and grsecurity will be doubled in size. Since several grsecurity
50923+ features use additional randomness, it is recommended that you say Y
50924+ here. Saying Y here has a similar effect as modifying
50925+ /proc/sys/kernel/random/poolsize.
50926+
50927+config GRKERNSEC_BLACKHOLE
50928+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50929+ help
50930+ If you say Y here, neither TCP resets nor ICMP
50931+ destination-unreachable packets will be sent in response to packets
50932+ sent to ports for which no associated listening process exists.
50933+ This feature supports both IPV4 and IPV6 and exempts the
50934+ loopback interface from blackholing. Enabling this feature
50935+ makes a host more resilient to DoS attacks and reduces network
50936+ visibility against scanners.
50937+
50938+ The blackhole feature as-implemented is equivalent to the FreeBSD
50939+ blackhole feature, as it prevents RST responses to all packets, not
50940+ just SYNs. Under most application behavior this causes no
50941+ problems, but applications (like haproxy) may not close certain
50942+ connections in a way that cleanly terminates them on the remote
50943+ end, leaving the remote host in LAST_ACK state. Because of this
50944+ side-effect and to prevent intentional LAST_ACK DoSes, this
50945+ feature also adds automatic mitigation against such attacks.
50946+ The mitigation drastically reduces the amount of time a socket
50947+ can spend in LAST_ACK state. If you're using haproxy and not
50948+ all servers it connects to have this option enabled, consider
50949+ disabling this feature on the haproxy host.
50950+
50951+ If the sysctl option is enabled, two sysctl options with names
50952+ "ip_blackhole" and "lastack_retries" will be created.
50953+ While "ip_blackhole" takes the standard zero/non-zero on/off
50954+ toggle, "lastack_retries" uses the same kinds of values as
50955+ "tcp_retries1" and "tcp_retries2". The default value of 4
50956+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50957+ state.
50958+
50959+config GRKERNSEC_SOCKET
50960+ bool "Socket restrictions"
50961+ help
50962+ If you say Y here, you will be able to choose from several options.
50963+ If you assign a GID on your system and add it to the supplementary
50964+ groups of users you want to restrict socket access to, this patch
50965+ will perform up to three things, based on the option(s) you choose.
50966+
50967+config GRKERNSEC_SOCKET_ALL
50968+ bool "Deny any sockets to group"
50969+ depends on GRKERNSEC_SOCKET
50970+ help
50971+ If you say Y here, you will be able to choose a GID of whose users will
50972+ be unable to connect to other hosts from your machine or run server
50973+ applications from your machine. If the sysctl option is enabled, a
50974+ sysctl option with name "socket_all" is created.
50975+
50976+config GRKERNSEC_SOCKET_ALL_GID
50977+ int "GID to deny all sockets for"
50978+ depends on GRKERNSEC_SOCKET_ALL
50979+ default 1004
50980+ help
50981+ Here you can choose the GID to disable socket access for. Remember to
50982+ add the users you want socket access disabled for to the GID
50983+ specified here. If the sysctl option is enabled, a sysctl option
50984+ with name "socket_all_gid" is created.
50985+
50986+config GRKERNSEC_SOCKET_CLIENT
50987+ bool "Deny client sockets to group"
50988+ depends on GRKERNSEC_SOCKET
50989+ help
50990+ If you say Y here, you will be able to choose a GID of whose users will
50991+ be unable to connect to other hosts from your machine, but will be
50992+ able to run servers. If this option is enabled, all users in the group
50993+ you specify will have to use passive mode when initiating ftp transfers
50994+ from the shell on your machine. If the sysctl option is enabled, a
50995+ sysctl option with name "socket_client" is created.
50996+
50997+config GRKERNSEC_SOCKET_CLIENT_GID
50998+ int "GID to deny client sockets for"
50999+ depends on GRKERNSEC_SOCKET_CLIENT
51000+ default 1003
51001+ help
51002+ Here you can choose the GID to disable client socket access for.
51003+ Remember to add the users you want client socket access disabled for to
51004+ the GID specified here. If the sysctl option is enabled, a sysctl
51005+ option with name "socket_client_gid" is created.
51006+
51007+config GRKERNSEC_SOCKET_SERVER
51008+ bool "Deny server sockets to group"
51009+ depends on GRKERNSEC_SOCKET
51010+ help
51011+ If you say Y here, you will be able to choose a GID of whose users will
51012+ be unable to run server applications from your machine. If the sysctl
51013+ option is enabled, a sysctl option with name "socket_server" is created.
51014+
51015+config GRKERNSEC_SOCKET_SERVER_GID
51016+ int "GID to deny server sockets for"
51017+ depends on GRKERNSEC_SOCKET_SERVER
51018+ default 1002
51019+ help
51020+ Here you can choose the GID to disable server socket access for.
51021+ Remember to add the users you want server socket access disabled for to
51022+ the GID specified here. If the sysctl option is enabled, a sysctl
51023+ option with name "socket_server_gid" is created.
51024+
51025+endmenu
51026+menu "Sysctl support"
51027+depends on GRKERNSEC && SYSCTL
51028+
51029+config GRKERNSEC_SYSCTL
51030+ bool "Sysctl support"
51031+ help
51032+ If you say Y here, you will be able to change the options that
51033+ grsecurity runs with at bootup, without having to recompile your
51034+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51035+ to enable (1) or disable (0) various features. All the sysctl entries
51036+ are mutable until the "grsec_lock" entry is set to a non-zero value.
51037+ All features enabled in the kernel configuration are disabled at boot
51038+ if you do not say Y to the "Turn on features by default" option.
51039+ All options should be set at startup, and the grsec_lock entry should
51040+ be set to a non-zero value after all the options are set.
51041+ *THIS IS EXTREMELY IMPORTANT*
51042+
51043+config GRKERNSEC_SYSCTL_DISTRO
51044+ bool "Extra sysctl support for distro makers (READ HELP)"
51045+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51046+ help
51047+ If you say Y here, additional sysctl options will be created
51048+ for features that affect processes running as root. Therefore,
51049+ it is critical when using this option that the grsec_lock entry be
51050+ enabled after boot. Only distros with prebuilt kernel packages
51051+ with this option enabled that can ensure grsec_lock is enabled
51052+ after boot should use this option.
51053+ *Failure to set grsec_lock after boot makes all grsec features
51054+ this option covers useless*
51055+
51056+ Currently this option creates the following sysctl entries:
51057+ "Disable Privileged I/O": "disable_priv_io"
51058+
51059+config GRKERNSEC_SYSCTL_ON
51060+ bool "Turn on features by default"
51061+ depends on GRKERNSEC_SYSCTL
51062+ help
51063+ If you say Y here, instead of having all features enabled in the
51064+ kernel configuration disabled at boot time, the features will be
51065+ enabled at boot time. It is recommended you say Y here unless
51066+ there is some reason you would want all sysctl-tunable features to
51067+ be disabled by default. As mentioned elsewhere, it is important
51068+ to enable the grsec_lock entry once you have finished modifying
51069+ the sysctl entries.
51070+
51071+endmenu
51072+menu "Logging Options"
51073+depends on GRKERNSEC
51074+
51075+config GRKERNSEC_FLOODTIME
51076+ int "Seconds in between log messages (minimum)"
51077+ default 10
51078+ help
51079+ This option allows you to enforce the number of seconds between
51080+ grsecurity log messages. The default should be suitable for most
51081+ people, however, if you choose to change it, choose a value small enough
51082+ to allow informative logs to be produced, but large enough to
51083+ prevent flooding.
51084+
51085+config GRKERNSEC_FLOODBURST
51086+ int "Number of messages in a burst (maximum)"
51087+ default 4
51088+ help
51089+ This option allows you to choose the maximum number of messages allowed
51090+ within the flood time interval you chose in a separate option. The
51091+ default should be suitable for most people, however if you find that
51092+ many of your logs are being interpreted as flooding, you may want to
51093+ raise this value.
51094+
51095+endmenu
51096+
51097+endmenu
51098diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
51099--- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
51100+++ linux-2.6.39.4/grsecurity/Makefile 2011-08-05 19:44:37.000000000 -0400
51101@@ -0,0 +1,33 @@
51102+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51103+# during 2001-2009 it has been completely redesigned by Brad Spengler
51104+# into an RBAC system
51105+#
51106+# All code in this directory and various hooks inserted throughout the kernel
51107+# are copyright Brad Spengler - Open Source Security, Inc., and released
51108+# under the GPL v2 or higher
51109+
51110+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51111+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
51112+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51113+
51114+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51115+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51116+ gracl_learn.o grsec_log.o
51117+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51118+
51119+ifdef CONFIG_NET
51120+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51121+endif
51122+
51123+ifndef CONFIG_GRKERNSEC
51124+obj-y += grsec_disabled.o
51125+endif
51126+
51127+ifdef CONFIG_GRKERNSEC_HIDESYM
51128+extra-y := grsec_hidesym.o
51129+$(obj)/grsec_hidesym.o:
51130+ @-chmod -f 500 /boot
51131+ @-chmod -f 500 /lib/modules
51132+ @-chmod -f 700 .
51133+ @echo ' grsec: protected kernel image paths'
51134+endif
51135diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
51136--- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
51137+++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
51138@@ -107,7 +107,7 @@ struct acpi_device_ops {
51139 acpi_op_bind bind;
51140 acpi_op_unbind unbind;
51141 acpi_op_notify notify;
51142-};
51143+} __no_const;
51144
51145 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
51146
51147diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51148--- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51149+++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51150@@ -22,6 +22,12 @@
51151
51152 typedef atomic64_t atomic_long_t;
51153
51154+#ifdef CONFIG_PAX_REFCOUNT
51155+typedef atomic64_unchecked_t atomic_long_unchecked_t;
51156+#else
51157+typedef atomic64_t atomic_long_unchecked_t;
51158+#endif
51159+
51160 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51161
51162 static inline long atomic_long_read(atomic_long_t *l)
51163@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51164 return (long)atomic64_read(v);
51165 }
51166
51167+#ifdef CONFIG_PAX_REFCOUNT
51168+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51169+{
51170+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51171+
51172+ return (long)atomic64_read_unchecked(v);
51173+}
51174+#endif
51175+
51176 static inline void atomic_long_set(atomic_long_t *l, long i)
51177 {
51178 atomic64_t *v = (atomic64_t *)l;
51179@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51180 atomic64_set(v, i);
51181 }
51182
51183+#ifdef CONFIG_PAX_REFCOUNT
51184+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51185+{
51186+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51187+
51188+ atomic64_set_unchecked(v, i);
51189+}
51190+#endif
51191+
51192 static inline void atomic_long_inc(atomic_long_t *l)
51193 {
51194 atomic64_t *v = (atomic64_t *)l;
51195@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51196 atomic64_inc(v);
51197 }
51198
51199+#ifdef CONFIG_PAX_REFCOUNT
51200+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51201+{
51202+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51203+
51204+ atomic64_inc_unchecked(v);
51205+}
51206+#endif
51207+
51208 static inline void atomic_long_dec(atomic_long_t *l)
51209 {
51210 atomic64_t *v = (atomic64_t *)l;
51211@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51212 atomic64_dec(v);
51213 }
51214
51215+#ifdef CONFIG_PAX_REFCOUNT
51216+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51217+{
51218+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51219+
51220+ atomic64_dec_unchecked(v);
51221+}
51222+#endif
51223+
51224 static inline void atomic_long_add(long i, atomic_long_t *l)
51225 {
51226 atomic64_t *v = (atomic64_t *)l;
51227@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51228 atomic64_add(i, v);
51229 }
51230
51231+#ifdef CONFIG_PAX_REFCOUNT
51232+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51233+{
51234+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51235+
51236+ atomic64_add_unchecked(i, v);
51237+}
51238+#endif
51239+
51240 static inline void atomic_long_sub(long i, atomic_long_t *l)
51241 {
51242 atomic64_t *v = (atomic64_t *)l;
51243@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51244 atomic64_sub(i, v);
51245 }
51246
51247+#ifdef CONFIG_PAX_REFCOUNT
51248+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51249+{
51250+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51251+
51252+ atomic64_sub_unchecked(i, v);
51253+}
51254+#endif
51255+
51256 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51257 {
51258 atomic64_t *v = (atomic64_t *)l;
51259@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51260 return (long)atomic64_inc_return(v);
51261 }
51262
51263+#ifdef CONFIG_PAX_REFCOUNT
51264+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51265+{
51266+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51267+
51268+ return (long)atomic64_inc_return_unchecked(v);
51269+}
51270+#endif
51271+
51272 static inline long atomic_long_dec_return(atomic_long_t *l)
51273 {
51274 atomic64_t *v = (atomic64_t *)l;
51275@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51276
51277 typedef atomic_t atomic_long_t;
51278
51279+#ifdef CONFIG_PAX_REFCOUNT
51280+typedef atomic_unchecked_t atomic_long_unchecked_t;
51281+#else
51282+typedef atomic_t atomic_long_unchecked_t;
51283+#endif
51284+
51285 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51286 static inline long atomic_long_read(atomic_long_t *l)
51287 {
51288@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51289 return (long)atomic_read(v);
51290 }
51291
51292+#ifdef CONFIG_PAX_REFCOUNT
51293+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51294+{
51295+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51296+
51297+ return (long)atomic_read_unchecked(v);
51298+}
51299+#endif
51300+
51301 static inline void atomic_long_set(atomic_long_t *l, long i)
51302 {
51303 atomic_t *v = (atomic_t *)l;
51304@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51305 atomic_set(v, i);
51306 }
51307
51308+#ifdef CONFIG_PAX_REFCOUNT
51309+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51310+{
51311+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51312+
51313+ atomic_set_unchecked(v, i);
51314+}
51315+#endif
51316+
51317 static inline void atomic_long_inc(atomic_long_t *l)
51318 {
51319 atomic_t *v = (atomic_t *)l;
51320@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51321 atomic_inc(v);
51322 }
51323
51324+#ifdef CONFIG_PAX_REFCOUNT
51325+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51326+{
51327+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51328+
51329+ atomic_inc_unchecked(v);
51330+}
51331+#endif
51332+
51333 static inline void atomic_long_dec(atomic_long_t *l)
51334 {
51335 atomic_t *v = (atomic_t *)l;
51336@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51337 atomic_dec(v);
51338 }
51339
51340+#ifdef CONFIG_PAX_REFCOUNT
51341+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51342+{
51343+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51344+
51345+ atomic_dec_unchecked(v);
51346+}
51347+#endif
51348+
51349 static inline void atomic_long_add(long i, atomic_long_t *l)
51350 {
51351 atomic_t *v = (atomic_t *)l;
51352@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51353 atomic_add(i, v);
51354 }
51355
51356+#ifdef CONFIG_PAX_REFCOUNT
51357+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51358+{
51359+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51360+
51361+ atomic_add_unchecked(i, v);
51362+}
51363+#endif
51364+
51365 static inline void atomic_long_sub(long i, atomic_long_t *l)
51366 {
51367 atomic_t *v = (atomic_t *)l;
51368@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51369 atomic_sub(i, v);
51370 }
51371
51372+#ifdef CONFIG_PAX_REFCOUNT
51373+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51374+{
51375+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51376+
51377+ atomic_sub_unchecked(i, v);
51378+}
51379+#endif
51380+
51381 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51382 {
51383 atomic_t *v = (atomic_t *)l;
51384@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51385 return (long)atomic_inc_return(v);
51386 }
51387
51388+#ifdef CONFIG_PAX_REFCOUNT
51389+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51390+{
51391+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51392+
51393+ return (long)atomic_inc_return_unchecked(v);
51394+}
51395+#endif
51396+
51397 static inline long atomic_long_dec_return(atomic_long_t *l)
51398 {
51399 atomic_t *v = (atomic_t *)l;
51400@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51401
51402 #endif /* BITS_PER_LONG == 64 */
51403
51404+#ifdef CONFIG_PAX_REFCOUNT
51405+static inline void pax_refcount_needs_these_functions(void)
51406+{
51407+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
51408+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51409+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51410+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51411+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51412+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51413+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51414+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51415+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51416+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51417+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51418+
51419+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51420+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51421+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51422+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51423+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51424+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51425+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51426+}
51427+#else
51428+#define atomic_read_unchecked(v) atomic_read(v)
51429+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51430+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51431+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51432+#define atomic_inc_unchecked(v) atomic_inc(v)
51433+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51434+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51435+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51436+#define atomic_dec_unchecked(v) atomic_dec(v)
51437+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51438+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51439+
51440+#define atomic_long_read_unchecked(v) atomic_long_read(v)
51441+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51442+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51443+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51444+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51445+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51446+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51447+#endif
51448+
51449 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51450diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51451--- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51452+++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51453@@ -6,7 +6,7 @@
51454 * cache lines need to provide their own cache.h.
51455 */
51456
51457-#define L1_CACHE_SHIFT 5
51458-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51459+#define L1_CACHE_SHIFT 5UL
51460+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51461
51462 #endif /* __ASM_GENERIC_CACHE_H */
51463diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51464--- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51465+++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51466@@ -46,6 +46,8 @@ typedef unsigned int u32;
51467 typedef signed long s64;
51468 typedef unsigned long u64;
51469
51470+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51471+
51472 #define S8_C(x) x
51473 #define U8_C(x) x ## U
51474 #define S16_C(x) x
51475diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51476--- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51477+++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51478@@ -51,6 +51,8 @@ typedef unsigned int u32;
51479 typedef signed long long s64;
51480 typedef unsigned long long u64;
51481
51482+typedef unsigned long long intoverflow_t;
51483+
51484 #define S8_C(x) x
51485 #define U8_C(x) x ## U
51486 #define S16_C(x) x
51487diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51488--- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51489+++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51490@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51491 KMAP_D(17) KM_NMI,
51492 KMAP_D(18) KM_NMI_PTE,
51493 KMAP_D(19) KM_KDB,
51494+KMAP_D(20) KM_CLEARPAGE,
51495 /*
51496 * Remember to update debug_kmap_atomic() when adding new kmap types!
51497 */
51498-KMAP_D(20) KM_TYPE_NR
51499+KMAP_D(21) KM_TYPE_NR
51500 };
51501
51502 #undef KMAP_D
51503diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51504--- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51505+++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51506@@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51507 #endif /* __HAVE_ARCH_PMD_WRITE */
51508 #endif
51509
51510+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51511+static inline unsigned long pax_open_kernel(void) { return 0; }
51512+#endif
51513+
51514+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51515+static inline unsigned long pax_close_kernel(void) { return 0; }
51516+#endif
51517+
51518 #endif /* !__ASSEMBLY__ */
51519
51520 #endif /* _ASM_GENERIC_PGTABLE_H */
51521diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51522--- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51523+++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51524@@ -1,14 +1,19 @@
51525 #ifndef _PGTABLE_NOPMD_H
51526 #define _PGTABLE_NOPMD_H
51527
51528-#ifndef __ASSEMBLY__
51529-
51530 #include <asm-generic/pgtable-nopud.h>
51531
51532-struct mm_struct;
51533-
51534 #define __PAGETABLE_PMD_FOLDED
51535
51536+#define PMD_SHIFT PUD_SHIFT
51537+#define PTRS_PER_PMD 1
51538+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51539+#define PMD_MASK (~(PMD_SIZE-1))
51540+
51541+#ifndef __ASSEMBLY__
51542+
51543+struct mm_struct;
51544+
51545 /*
51546 * Having the pmd type consist of a pud gets the size right, and allows
51547 * us to conceptually access the pud entry that this pmd is folded into
51548@@ -16,11 +21,6 @@ struct mm_struct;
51549 */
51550 typedef struct { pud_t pud; } pmd_t;
51551
51552-#define PMD_SHIFT PUD_SHIFT
51553-#define PTRS_PER_PMD 1
51554-#define PMD_SIZE (1UL << PMD_SHIFT)
51555-#define PMD_MASK (~(PMD_SIZE-1))
51556-
51557 /*
51558 * The "pud_xxx()" functions here are trivial for a folded two-level
51559 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51560diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51561--- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51562+++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51563@@ -1,10 +1,15 @@
51564 #ifndef _PGTABLE_NOPUD_H
51565 #define _PGTABLE_NOPUD_H
51566
51567-#ifndef __ASSEMBLY__
51568-
51569 #define __PAGETABLE_PUD_FOLDED
51570
51571+#define PUD_SHIFT PGDIR_SHIFT
51572+#define PTRS_PER_PUD 1
51573+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51574+#define PUD_MASK (~(PUD_SIZE-1))
51575+
51576+#ifndef __ASSEMBLY__
51577+
51578 /*
51579 * Having the pud type consist of a pgd gets the size right, and allows
51580 * us to conceptually access the pgd entry that this pud is folded into
51581@@ -12,11 +17,6 @@
51582 */
51583 typedef struct { pgd_t pgd; } pud_t;
51584
51585-#define PUD_SHIFT PGDIR_SHIFT
51586-#define PTRS_PER_PUD 1
51587-#define PUD_SIZE (1UL << PUD_SHIFT)
51588-#define PUD_MASK (~(PUD_SIZE-1))
51589-
51590 /*
51591 * The "pgd_xxx()" functions here are trivial for a folded two-level
51592 * setup: the pud is never bad, and a pud always exists (as it's folded
51593diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51594--- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51595+++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51596@@ -213,6 +213,7 @@
51597 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51598 VMLINUX_SYMBOL(__start_rodata) = .; \
51599 *(.rodata) *(.rodata.*) \
51600+ *(.data..read_only) \
51601 *(__vermagic) /* Kernel version magic */ \
51602 . = ALIGN(8); \
51603 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51604@@ -707,14 +708,15 @@
51605 * section in the linker script will go there too. @phdr should have
51606 * a leading colon.
51607 *
51608- * Note that this macros defines __per_cpu_load as an absolute symbol.
51609+ * Note that this macros defines per_cpu_load as an absolute symbol.
51610 * If there is no need to put the percpu section at a predetermined
51611 * address, use PERCPU().
51612 */
51613 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51614- VMLINUX_SYMBOL(__per_cpu_load) = .; \
51615- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51616+ per_cpu_load = .; \
51617+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51618 - LOAD_OFFSET) { \
51619+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51620 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51621 *(.data..percpu..first) \
51622 . = ALIGN(PAGE_SIZE); \
51623@@ -726,7 +728,7 @@
51624 *(.data..percpu..shared_aligned) \
51625 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51626 } phdr \
51627- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51628+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51629
51630 /**
51631 * PERCPU - define output section for percpu area, simple version
51632diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51633--- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51634+++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51635@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51636
51637 /* disable crtc when not in use - more explicit than dpms off */
51638 void (*disable)(struct drm_crtc *crtc);
51639-};
51640+} __no_const;
51641
51642 struct drm_encoder_helper_funcs {
51643 void (*dpms)(struct drm_encoder *encoder, int mode);
51644@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51645 struct drm_connector *connector);
51646 /* disable encoder when not in use - more explicit than dpms off */
51647 void (*disable)(struct drm_encoder *encoder);
51648-};
51649+} __no_const;
51650
51651 struct drm_connector_helper_funcs {
51652 int (*get_modes)(struct drm_connector *connector);
51653diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51654--- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51655+++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51656@@ -73,6 +73,7 @@
51657 #include <linux/workqueue.h>
51658 #include <linux/poll.h>
51659 #include <asm/pgalloc.h>
51660+#include <asm/local.h>
51661 #include "drm.h"
51662
51663 #include <linux/idr.h>
51664@@ -1023,7 +1024,7 @@ struct drm_device {
51665
51666 /** \name Usage Counters */
51667 /*@{ */
51668- int open_count; /**< Outstanding files open */
51669+ local_t open_count; /**< Outstanding files open */
51670 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51671 atomic_t vma_count; /**< Outstanding vma areas open */
51672 int buf_use; /**< Buffers in use -- cannot alloc */
51673@@ -1034,7 +1035,7 @@ struct drm_device {
51674 /*@{ */
51675 unsigned long counters;
51676 enum drm_stat_type types[15];
51677- atomic_t counts[15];
51678+ atomic_unchecked_t counts[15];
51679 /*@} */
51680
51681 struct list_head filelist;
51682diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51683--- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51684+++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51685@@ -47,7 +47,7 @@
51686
51687 struct ttm_mem_shrink {
51688 int (*do_shrink) (struct ttm_mem_shrink *);
51689-};
51690+} __no_const;
51691
51692 /**
51693 * struct ttm_mem_global - Global memory accounting structure.
51694diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51695--- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51696+++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51697@@ -39,6 +39,14 @@ enum machine_type {
51698 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51699 };
51700
51701+/* Constants for the N_FLAGS field */
51702+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51703+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51704+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51705+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51706+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51707+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51708+
51709 #if !defined (N_MAGIC)
51710 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51711 #endif
51712diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51713--- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51714+++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51715@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51716 #endif
51717
51718 struct k_atm_aal_stats {
51719-#define __HANDLE_ITEM(i) atomic_t i
51720+#define __HANDLE_ITEM(i) atomic_unchecked_t i
51721 __AAL_STAT_ITEMS
51722 #undef __HANDLE_ITEM
51723 };
51724diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51725--- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51726+++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51727@@ -92,6 +92,7 @@ struct linux_binfmt {
51728 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51729 int (*load_shlib)(struct file *);
51730 int (*core_dump)(struct coredump_params *cprm);
51731+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51732 unsigned long min_coredump; /* minimal dump size */
51733 };
51734
51735diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51736--- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51737+++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51738@@ -1307,7 +1307,7 @@ struct block_device_operations {
51739 int (*getgeo)(struct block_device *, struct hd_geometry *);
51740 /* this callback is with swap_lock and sometimes page table lock held */
51741 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51742- struct module *owner;
51743+ struct module * const owner;
51744 };
51745
51746 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51747diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51748--- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51749+++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51750@@ -161,7 +161,7 @@ struct blk_trace {
51751 struct dentry *dir;
51752 struct dentry *dropped_file;
51753 struct dentry *msg_file;
51754- atomic_t dropped;
51755+ atomic_unchecked_t dropped;
51756 };
51757
51758 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51759diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51760--- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51761+++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51762@@ -42,51 +42,51 @@
51763
51764 static inline __le64 __cpu_to_le64p(const __u64 *p)
51765 {
51766- return (__force __le64)*p;
51767+ return (__force const __le64)*p;
51768 }
51769 static inline __u64 __le64_to_cpup(const __le64 *p)
51770 {
51771- return (__force __u64)*p;
51772+ return (__force const __u64)*p;
51773 }
51774 static inline __le32 __cpu_to_le32p(const __u32 *p)
51775 {
51776- return (__force __le32)*p;
51777+ return (__force const __le32)*p;
51778 }
51779 static inline __u32 __le32_to_cpup(const __le32 *p)
51780 {
51781- return (__force __u32)*p;
51782+ return (__force const __u32)*p;
51783 }
51784 static inline __le16 __cpu_to_le16p(const __u16 *p)
51785 {
51786- return (__force __le16)*p;
51787+ return (__force const __le16)*p;
51788 }
51789 static inline __u16 __le16_to_cpup(const __le16 *p)
51790 {
51791- return (__force __u16)*p;
51792+ return (__force const __u16)*p;
51793 }
51794 static inline __be64 __cpu_to_be64p(const __u64 *p)
51795 {
51796- return (__force __be64)__swab64p(p);
51797+ return (__force const __be64)__swab64p(p);
51798 }
51799 static inline __u64 __be64_to_cpup(const __be64 *p)
51800 {
51801- return __swab64p((__u64 *)p);
51802+ return __swab64p((const __u64 *)p);
51803 }
51804 static inline __be32 __cpu_to_be32p(const __u32 *p)
51805 {
51806- return (__force __be32)__swab32p(p);
51807+ return (__force const __be32)__swab32p(p);
51808 }
51809 static inline __u32 __be32_to_cpup(const __be32 *p)
51810 {
51811- return __swab32p((__u32 *)p);
51812+ return __swab32p((const __u32 *)p);
51813 }
51814 static inline __be16 __cpu_to_be16p(const __u16 *p)
51815 {
51816- return (__force __be16)__swab16p(p);
51817+ return (__force const __be16)__swab16p(p);
51818 }
51819 static inline __u16 __be16_to_cpup(const __be16 *p)
51820 {
51821- return __swab16p((__u16 *)p);
51822+ return __swab16p((const __u16 *)p);
51823 }
51824 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51825 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51826diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51827--- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51828+++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51829@@ -16,6 +16,10 @@
51830 #define __read_mostly
51831 #endif
51832
51833+#ifndef __read_only
51834+#define __read_only __read_mostly
51835+#endif
51836+
51837 #ifndef ____cacheline_aligned
51838 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51839 #endif
51840diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51841--- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51842+++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51843@@ -547,6 +547,9 @@ extern bool capable(int cap);
51844 extern bool ns_capable(struct user_namespace *ns, int cap);
51845 extern bool task_ns_capable(struct task_struct *t, int cap);
51846 extern bool nsown_capable(int cap);
51847+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51848+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51849+extern bool capable_nolog(int cap);
51850
51851 /* audit system wants to get cap info from files as well */
51852 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51853diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51854--- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51855+++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51856@@ -31,6 +31,9 @@
51857
51858
51859 #if __GNUC_MINOR__ >= 5
51860+
51861+#define __no_const __attribute__((no_const))
51862+
51863 /*
51864 * Mark a position in code as unreachable. This can be used to
51865 * suppress control flow warnings after asm blocks that transfer
51866@@ -46,6 +49,11 @@
51867 #define __noclone __attribute__((__noclone__))
51868
51869 #endif
51870+
51871+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51872+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51873+#define __bos0(ptr) __bos((ptr), 0)
51874+#define __bos1(ptr) __bos((ptr), 1)
51875 #endif
51876
51877 #if __GNUC_MINOR__ > 0
51878diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51879--- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51880+++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51881@@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51882 # define __attribute_const__ /* unimplemented */
51883 #endif
51884
51885+#ifndef __no_const
51886+# define __no_const
51887+#endif
51888+
51889 /*
51890 * Tell gcc if a function is cold. The compiler will assume any path
51891 * directly leading to the call is unlikely.
51892@@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51893 #define __cold
51894 #endif
51895
51896+#ifndef __alloc_size
51897+#define __alloc_size(...)
51898+#endif
51899+
51900+#ifndef __bos
51901+#define __bos(ptr, arg)
51902+#endif
51903+
51904+#ifndef __bos0
51905+#define __bos0(ptr)
51906+#endif
51907+
51908+#ifndef __bos1
51909+#define __bos1(ptr)
51910+#endif
51911+
51912 /* Simple shorthand for a section definition */
51913 #ifndef __section
51914 # define __section(S) __attribute__ ((__section__(#S)))
51915@@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51916 * use is to mediate communication between process-level code and irq/NMI
51917 * handlers, all running on the same CPU.
51918 */
51919-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51920+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51921+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51922
51923 #endif /* __LINUX_COMPILER_H */
51924diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51925--- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51926+++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51927@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51928 * nodemask.
51929 */
51930 smp_mb();
51931- --ACCESS_ONCE(current->mems_allowed_change_disable);
51932+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51933 }
51934
51935 static inline void set_mems_allowed(nodemask_t nodemask)
51936diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51937--- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51938+++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51939@@ -361,7 +361,7 @@ struct cipher_tfm {
51940 const u8 *key, unsigned int keylen);
51941 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51942 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51943-};
51944+} __no_const;
51945
51946 struct hash_tfm {
51947 int (*init)(struct hash_desc *desc);
51948@@ -382,13 +382,13 @@ struct compress_tfm {
51949 int (*cot_decompress)(struct crypto_tfm *tfm,
51950 const u8 *src, unsigned int slen,
51951 u8 *dst, unsigned int *dlen);
51952-};
51953+} __no_const;
51954
51955 struct rng_tfm {
51956 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51957 unsigned int dlen);
51958 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51959-};
51960+} __no_const;
51961
51962 #define crt_ablkcipher crt_u.ablkcipher
51963 #define crt_aead crt_u.aead
51964diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51965--- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51966+++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51967@@ -77,7 +77,7 @@ static void free(void *where)
51968 * warnings when not needed (indeed large_malloc / large_free are not
51969 * needed by inflate */
51970
51971-#define malloc(a) kmalloc(a, GFP_KERNEL)
51972+#define malloc(a) kmalloc((a), GFP_KERNEL)
51973 #define free(a) kfree(a)
51974
51975 #define large_malloc(a) vmalloc(a)
51976diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
51977--- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
51978+++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
51979@@ -49,7 +49,7 @@ struct dma_map_ops {
51980 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51981 int (*dma_supported)(struct device *dev, u64 mask);
51982 int (*set_dma_mask)(struct device *dev, u64 mask);
51983- int is_phys;
51984+ const int is_phys;
51985 };
51986
51987 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51988diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
51989--- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
51990+++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
51991@@ -409,7 +409,7 @@ struct efivar_operations {
51992 efi_get_variable_t *get_variable;
51993 efi_get_next_variable_t *get_next_variable;
51994 efi_set_variable_t *set_variable;
51995-};
51996+} __no_const;
51997
51998 struct efivars {
51999 /*
52000diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
52001--- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
52002+++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
52003@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
52004 #define PT_GNU_EH_FRAME 0x6474e550
52005
52006 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
52007+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
52008+
52009+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
52010+
52011+/* Constants for the e_flags field */
52012+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
52013+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
52014+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
52015+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
52016+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
52017+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
52018
52019 /*
52020 * Extended Numbering
52021@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
52022 #define DT_DEBUG 21
52023 #define DT_TEXTREL 22
52024 #define DT_JMPREL 23
52025+#define DT_FLAGS 30
52026+ #define DF_TEXTREL 0x00000004
52027 #define DT_ENCODING 32
52028 #define OLD_DT_LOOS 0x60000000
52029 #define DT_LOOS 0x6000000d
52030@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
52031 #define PF_W 0x2
52032 #define PF_X 0x1
52033
52034+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
52035+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
52036+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
52037+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
52038+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
52039+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
52040+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
52041+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
52042+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
52043+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
52044+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
52045+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
52046+
52047 typedef struct elf32_phdr{
52048 Elf32_Word p_type;
52049 Elf32_Off p_offset;
52050@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
52051 #define EI_OSABI 7
52052 #define EI_PAD 8
52053
52054+#define EI_PAX 14
52055+
52056 #define ELFMAG0 0x7f /* EI_MAG */
52057 #define ELFMAG1 'E'
52058 #define ELFMAG2 'L'
52059@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
52060 #define elf_note elf32_note
52061 #define elf_addr_t Elf32_Off
52062 #define Elf_Half Elf32_Half
52063+#define elf_dyn Elf32_Dyn
52064
52065 #else
52066
52067@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
52068 #define elf_note elf64_note
52069 #define elf_addr_t Elf64_Off
52070 #define Elf_Half Elf64_Half
52071+#define elf_dyn Elf64_Dyn
52072
52073 #endif
52074
52075diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
52076--- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
52077+++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
52078@@ -429,7 +429,7 @@ struct fw_iso_context {
52079 union {
52080 fw_iso_callback_t sc;
52081 fw_iso_mc_callback_t mc;
52082- } callback;
52083+ } __no_const callback;
52084 void *callback_data;
52085 };
52086
52087diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
52088--- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
52089+++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
52090@@ -113,7 +113,7 @@ struct fscache_operation {
52091 #endif
52092 };
52093
52094-extern atomic_t fscache_op_debug_id;
52095+extern atomic_unchecked_t fscache_op_debug_id;
52096 extern void fscache_op_work_func(struct work_struct *work);
52097
52098 extern void fscache_enqueue_operation(struct fscache_operation *);
52099@@ -133,7 +133,7 @@ static inline void fscache_operation_ini
52100 {
52101 INIT_WORK(&op->work, fscache_op_work_func);
52102 atomic_set(&op->usage, 1);
52103- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
52104+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52105 op->processor = processor;
52106 op->release = release;
52107 INIT_LIST_HEAD(&op->pend_link);
52108diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
52109--- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
52110+++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
52111@@ -108,6 +108,11 @@ struct inodes_stat_t {
52112 /* File was opened by fanotify and shouldn't generate fanotify events */
52113 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
52114
52115+/* Hack for grsec so as not to require read permission simply to execute
52116+ * a binary
52117+ */
52118+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
52119+
52120 /*
52121 * The below are the various read and write types that we support. Some of
52122 * them include behavioral modifiers that send information down to the
52123@@ -1535,7 +1540,7 @@ struct block_device_operations;
52124 * the big kernel lock held in all filesystems.
52125 */
52126 struct file_operations {
52127- struct module *owner;
52128+ struct module * const owner;
52129 loff_t (*llseek) (struct file *, loff_t, int);
52130 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
52131 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
52132@@ -1563,6 +1568,7 @@ struct file_operations {
52133 long (*fallocate)(struct file *file, int mode, loff_t offset,
52134 loff_t len);
52135 };
52136+typedef struct file_operations __no_const file_operations_no_const;
52137
52138 #define IPERM_FLAG_RCU 0x0001
52139
52140diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
52141--- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
52142+++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
52143@@ -6,7 +6,7 @@
52144 #include <linux/seqlock.h>
52145
52146 struct fs_struct {
52147- int users;
52148+ atomic_t users;
52149 spinlock_t lock;
52150 seqcount_t seq;
52151 int umask;
52152diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52153--- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52154+++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52155@@ -84,7 +84,7 @@ struct trace_event_functions {
52156 trace_print_func raw;
52157 trace_print_func hex;
52158 trace_print_func binary;
52159-};
52160+} __no_const;
52161
52162 struct trace_event {
52163 struct hlist_node node;
52164@@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52165 extern int trace_add_event_call(struct ftrace_event_call *call);
52166 extern void trace_remove_event_call(struct ftrace_event_call *call);
52167
52168-#define is_signed_type(type) (((type)(-1)) < 0)
52169+#define is_signed_type(type) (((type)(-1)) < (type)1)
52170
52171 int trace_set_clr_event(const char *system, const char *event, int set);
52172
52173diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52174--- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52175+++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52176@@ -184,7 +184,7 @@ struct gendisk {
52177 struct kobject *slave_dir;
52178
52179 struct timer_rand_state *random;
52180- atomic_t sync_io; /* RAID */
52181+ atomic_unchecked_t sync_io; /* RAID */
52182 struct disk_events *ev;
52183 #ifdef CONFIG_BLK_DEV_INTEGRITY
52184 struct blk_integrity *integrity;
52185diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52186--- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52187+++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52188@@ -0,0 +1,317 @@
52189+#ifndef GR_ACL_H
52190+#define GR_ACL_H
52191+
52192+#include <linux/grdefs.h>
52193+#include <linux/resource.h>
52194+#include <linux/capability.h>
52195+#include <linux/dcache.h>
52196+#include <asm/resource.h>
52197+
52198+/* Major status information */
52199+
52200+#define GR_VERSION "grsecurity 2.2.2"
52201+#define GRSECURITY_VERSION 0x2202
52202+
52203+enum {
52204+ GR_SHUTDOWN = 0,
52205+ GR_ENABLE = 1,
52206+ GR_SPROLE = 2,
52207+ GR_RELOAD = 3,
52208+ GR_SEGVMOD = 4,
52209+ GR_STATUS = 5,
52210+ GR_UNSPROLE = 6,
52211+ GR_PASSSET = 7,
52212+ GR_SPROLEPAM = 8,
52213+};
52214+
52215+/* Password setup definitions
52216+ * kernel/grhash.c */
52217+enum {
52218+ GR_PW_LEN = 128,
52219+ GR_SALT_LEN = 16,
52220+ GR_SHA_LEN = 32,
52221+};
52222+
52223+enum {
52224+ GR_SPROLE_LEN = 64,
52225+};
52226+
52227+enum {
52228+ GR_NO_GLOB = 0,
52229+ GR_REG_GLOB,
52230+ GR_CREATE_GLOB
52231+};
52232+
52233+#define GR_NLIMITS 32
52234+
52235+/* Begin Data Structures */
52236+
52237+struct sprole_pw {
52238+ unsigned char *rolename;
52239+ unsigned char salt[GR_SALT_LEN];
52240+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52241+};
52242+
52243+struct name_entry {
52244+ __u32 key;
52245+ ino_t inode;
52246+ dev_t device;
52247+ char *name;
52248+ __u16 len;
52249+ __u8 deleted;
52250+ struct name_entry *prev;
52251+ struct name_entry *next;
52252+};
52253+
52254+struct inodev_entry {
52255+ struct name_entry *nentry;
52256+ struct inodev_entry *prev;
52257+ struct inodev_entry *next;
52258+};
52259+
52260+struct acl_role_db {
52261+ struct acl_role_label **r_hash;
52262+ __u32 r_size;
52263+};
52264+
52265+struct inodev_db {
52266+ struct inodev_entry **i_hash;
52267+ __u32 i_size;
52268+};
52269+
52270+struct name_db {
52271+ struct name_entry **n_hash;
52272+ __u32 n_size;
52273+};
52274+
52275+struct crash_uid {
52276+ uid_t uid;
52277+ unsigned long expires;
52278+};
52279+
52280+struct gr_hash_struct {
52281+ void **table;
52282+ void **nametable;
52283+ void *first;
52284+ __u32 table_size;
52285+ __u32 used_size;
52286+ int type;
52287+};
52288+
52289+/* Userspace Grsecurity ACL data structures */
52290+
52291+struct acl_subject_label {
52292+ char *filename;
52293+ ino_t inode;
52294+ dev_t device;
52295+ __u32 mode;
52296+ kernel_cap_t cap_mask;
52297+ kernel_cap_t cap_lower;
52298+ kernel_cap_t cap_invert_audit;
52299+
52300+ struct rlimit res[GR_NLIMITS];
52301+ __u32 resmask;
52302+
52303+ __u8 user_trans_type;
52304+ __u8 group_trans_type;
52305+ uid_t *user_transitions;
52306+ gid_t *group_transitions;
52307+ __u16 user_trans_num;
52308+ __u16 group_trans_num;
52309+
52310+ __u32 sock_families[2];
52311+ __u32 ip_proto[8];
52312+ __u32 ip_type;
52313+ struct acl_ip_label **ips;
52314+ __u32 ip_num;
52315+ __u32 inaddr_any_override;
52316+
52317+ __u32 crashes;
52318+ unsigned long expires;
52319+
52320+ struct acl_subject_label *parent_subject;
52321+ struct gr_hash_struct *hash;
52322+ struct acl_subject_label *prev;
52323+ struct acl_subject_label *next;
52324+
52325+ struct acl_object_label **obj_hash;
52326+ __u32 obj_hash_size;
52327+ __u16 pax_flags;
52328+};
52329+
52330+struct role_allowed_ip {
52331+ __u32 addr;
52332+ __u32 netmask;
52333+
52334+ struct role_allowed_ip *prev;
52335+ struct role_allowed_ip *next;
52336+};
52337+
52338+struct role_transition {
52339+ char *rolename;
52340+
52341+ struct role_transition *prev;
52342+ struct role_transition *next;
52343+};
52344+
52345+struct acl_role_label {
52346+ char *rolename;
52347+ uid_t uidgid;
52348+ __u16 roletype;
52349+
52350+ __u16 auth_attempts;
52351+ unsigned long expires;
52352+
52353+ struct acl_subject_label *root_label;
52354+ struct gr_hash_struct *hash;
52355+
52356+ struct acl_role_label *prev;
52357+ struct acl_role_label *next;
52358+
52359+ struct role_transition *transitions;
52360+ struct role_allowed_ip *allowed_ips;
52361+ uid_t *domain_children;
52362+ __u16 domain_child_num;
52363+
52364+ struct acl_subject_label **subj_hash;
52365+ __u32 subj_hash_size;
52366+};
52367+
52368+struct user_acl_role_db {
52369+ struct acl_role_label **r_table;
52370+ __u32 num_pointers; /* Number of allocations to track */
52371+ __u32 num_roles; /* Number of roles */
52372+ __u32 num_domain_children; /* Number of domain children */
52373+ __u32 num_subjects; /* Number of subjects */
52374+ __u32 num_objects; /* Number of objects */
52375+};
52376+
52377+struct acl_object_label {
52378+ char *filename;
52379+ ino_t inode;
52380+ dev_t device;
52381+ __u32 mode;
52382+
52383+ struct acl_subject_label *nested;
52384+ struct acl_object_label *globbed;
52385+
52386+ /* next two structures not used */
52387+
52388+ struct acl_object_label *prev;
52389+ struct acl_object_label *next;
52390+};
52391+
52392+struct acl_ip_label {
52393+ char *iface;
52394+ __u32 addr;
52395+ __u32 netmask;
52396+ __u16 low, high;
52397+ __u8 mode;
52398+ __u32 type;
52399+ __u32 proto[8];
52400+
52401+ /* next two structures not used */
52402+
52403+ struct acl_ip_label *prev;
52404+ struct acl_ip_label *next;
52405+};
52406+
52407+struct gr_arg {
52408+ struct user_acl_role_db role_db;
52409+ unsigned char pw[GR_PW_LEN];
52410+ unsigned char salt[GR_SALT_LEN];
52411+ unsigned char sum[GR_SHA_LEN];
52412+ unsigned char sp_role[GR_SPROLE_LEN];
52413+ struct sprole_pw *sprole_pws;
52414+ dev_t segv_device;
52415+ ino_t segv_inode;
52416+ uid_t segv_uid;
52417+ __u16 num_sprole_pws;
52418+ __u16 mode;
52419+};
52420+
52421+struct gr_arg_wrapper {
52422+ struct gr_arg *arg;
52423+ __u32 version;
52424+ __u32 size;
52425+};
52426+
52427+struct subject_map {
52428+ struct acl_subject_label *user;
52429+ struct acl_subject_label *kernel;
52430+ struct subject_map *prev;
52431+ struct subject_map *next;
52432+};
52433+
52434+struct acl_subj_map_db {
52435+ struct subject_map **s_hash;
52436+ __u32 s_size;
52437+};
52438+
52439+/* End Data Structures Section */
52440+
52441+/* Hash functions generated by empirical testing by Brad Spengler
52442+ Makes good use of the low bits of the inode. Generally 0-1 times
52443+ in loop for successful match. 0-3 for unsuccessful match.
52444+ Shift/add algorithm with modulus of table size and an XOR*/
52445+
52446+static __inline__ unsigned int
52447+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52448+{
52449+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
52450+}
52451+
52452+ static __inline__ unsigned int
52453+shash(const struct acl_subject_label *userp, const unsigned int sz)
52454+{
52455+ return ((const unsigned long)userp % sz);
52456+}
52457+
52458+static __inline__ unsigned int
52459+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52460+{
52461+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52462+}
52463+
52464+static __inline__ unsigned int
52465+nhash(const char *name, const __u16 len, const unsigned int sz)
52466+{
52467+ return full_name_hash((const unsigned char *)name, len) % sz;
52468+}
52469+
52470+#define FOR_EACH_ROLE_START(role) \
52471+ role = role_list; \
52472+ while (role) {
52473+
52474+#define FOR_EACH_ROLE_END(role) \
52475+ role = role->prev; \
52476+ }
52477+
52478+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52479+ subj = NULL; \
52480+ iter = 0; \
52481+ while (iter < role->subj_hash_size) { \
52482+ if (subj == NULL) \
52483+ subj = role->subj_hash[iter]; \
52484+ if (subj == NULL) { \
52485+ iter++; \
52486+ continue; \
52487+ }
52488+
52489+#define FOR_EACH_SUBJECT_END(subj,iter) \
52490+ subj = subj->next; \
52491+ if (subj == NULL) \
52492+ iter++; \
52493+ }
52494+
52495+
52496+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52497+ subj = role->hash->first; \
52498+ while (subj != NULL) {
52499+
52500+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52501+ subj = subj->next; \
52502+ }
52503+
52504+#endif
52505+
52506diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52507--- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52508+++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52509@@ -0,0 +1,9 @@
52510+#ifndef __GRALLOC_H
52511+#define __GRALLOC_H
52512+
52513+void acl_free_all(void);
52514+int acl_alloc_stack_init(unsigned long size);
52515+void *acl_alloc(unsigned long len);
52516+void *acl_alloc_num(unsigned long num, unsigned long len);
52517+
52518+#endif
52519diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52520--- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52521+++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52522@@ -0,0 +1,140 @@
52523+#ifndef GRDEFS_H
52524+#define GRDEFS_H
52525+
52526+/* Begin grsecurity status declarations */
52527+
52528+enum {
52529+ GR_READY = 0x01,
52530+ GR_STATUS_INIT = 0x00 // disabled state
52531+};
52532+
52533+/* Begin ACL declarations */
52534+
52535+/* Role flags */
52536+
52537+enum {
52538+ GR_ROLE_USER = 0x0001,
52539+ GR_ROLE_GROUP = 0x0002,
52540+ GR_ROLE_DEFAULT = 0x0004,
52541+ GR_ROLE_SPECIAL = 0x0008,
52542+ GR_ROLE_AUTH = 0x0010,
52543+ GR_ROLE_NOPW = 0x0020,
52544+ GR_ROLE_GOD = 0x0040,
52545+ GR_ROLE_LEARN = 0x0080,
52546+ GR_ROLE_TPE = 0x0100,
52547+ GR_ROLE_DOMAIN = 0x0200,
52548+ GR_ROLE_PAM = 0x0400,
52549+ GR_ROLE_PERSIST = 0x0800
52550+};
52551+
52552+/* ACL Subject and Object mode flags */
52553+enum {
52554+ GR_DELETED = 0x80000000
52555+};
52556+
52557+/* ACL Object-only mode flags */
52558+enum {
52559+ GR_READ = 0x00000001,
52560+ GR_APPEND = 0x00000002,
52561+ GR_WRITE = 0x00000004,
52562+ GR_EXEC = 0x00000008,
52563+ GR_FIND = 0x00000010,
52564+ GR_INHERIT = 0x00000020,
52565+ GR_SETID = 0x00000040,
52566+ GR_CREATE = 0x00000080,
52567+ GR_DELETE = 0x00000100,
52568+ GR_LINK = 0x00000200,
52569+ GR_AUDIT_READ = 0x00000400,
52570+ GR_AUDIT_APPEND = 0x00000800,
52571+ GR_AUDIT_WRITE = 0x00001000,
52572+ GR_AUDIT_EXEC = 0x00002000,
52573+ GR_AUDIT_FIND = 0x00004000,
52574+ GR_AUDIT_INHERIT= 0x00008000,
52575+ GR_AUDIT_SETID = 0x00010000,
52576+ GR_AUDIT_CREATE = 0x00020000,
52577+ GR_AUDIT_DELETE = 0x00040000,
52578+ GR_AUDIT_LINK = 0x00080000,
52579+ GR_PTRACERD = 0x00100000,
52580+ GR_NOPTRACE = 0x00200000,
52581+ GR_SUPPRESS = 0x00400000,
52582+ GR_NOLEARN = 0x00800000,
52583+ GR_INIT_TRANSFER= 0x01000000
52584+};
52585+
52586+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52587+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52588+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52589+
52590+/* ACL subject-only mode flags */
52591+enum {
52592+ GR_KILL = 0x00000001,
52593+ GR_VIEW = 0x00000002,
52594+ GR_PROTECTED = 0x00000004,
52595+ GR_LEARN = 0x00000008,
52596+ GR_OVERRIDE = 0x00000010,
52597+ /* just a placeholder, this mode is only used in userspace */
52598+ GR_DUMMY = 0x00000020,
52599+ GR_PROTSHM = 0x00000040,
52600+ GR_KILLPROC = 0x00000080,
52601+ GR_KILLIPPROC = 0x00000100,
52602+ /* just a placeholder, this mode is only used in userspace */
52603+ GR_NOTROJAN = 0x00000200,
52604+ GR_PROTPROCFD = 0x00000400,
52605+ GR_PROCACCT = 0x00000800,
52606+ GR_RELAXPTRACE = 0x00001000,
52607+ GR_NESTED = 0x00002000,
52608+ GR_INHERITLEARN = 0x00004000,
52609+ GR_PROCFIND = 0x00008000,
52610+ GR_POVERRIDE = 0x00010000,
52611+ GR_KERNELAUTH = 0x00020000,
52612+ GR_ATSECURE = 0x00040000,
52613+ GR_SHMEXEC = 0x00080000
52614+};
52615+
52616+enum {
52617+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52618+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52619+ GR_PAX_ENABLE_MPROTECT = 0x0004,
52620+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
52621+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52622+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52623+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52624+ GR_PAX_DISABLE_MPROTECT = 0x0400,
52625+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
52626+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52627+};
52628+
52629+enum {
52630+ GR_ID_USER = 0x01,
52631+ GR_ID_GROUP = 0x02,
52632+};
52633+
52634+enum {
52635+ GR_ID_ALLOW = 0x01,
52636+ GR_ID_DENY = 0x02,
52637+};
52638+
52639+#define GR_CRASH_RES 31
52640+#define GR_UIDTABLE_MAX 500
52641+
52642+/* begin resource learning section */
52643+enum {
52644+ GR_RLIM_CPU_BUMP = 60,
52645+ GR_RLIM_FSIZE_BUMP = 50000,
52646+ GR_RLIM_DATA_BUMP = 10000,
52647+ GR_RLIM_STACK_BUMP = 1000,
52648+ GR_RLIM_CORE_BUMP = 10000,
52649+ GR_RLIM_RSS_BUMP = 500000,
52650+ GR_RLIM_NPROC_BUMP = 1,
52651+ GR_RLIM_NOFILE_BUMP = 5,
52652+ GR_RLIM_MEMLOCK_BUMP = 50000,
52653+ GR_RLIM_AS_BUMP = 500000,
52654+ GR_RLIM_LOCKS_BUMP = 2,
52655+ GR_RLIM_SIGPENDING_BUMP = 5,
52656+ GR_RLIM_MSGQUEUE_BUMP = 10000,
52657+ GR_RLIM_NICE_BUMP = 1,
52658+ GR_RLIM_RTPRIO_BUMP = 1,
52659+ GR_RLIM_RTTIME_BUMP = 1000000
52660+};
52661+
52662+#endif
52663diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52664--- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52665+++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52666@@ -0,0 +1,219 @@
52667+#ifndef __GRINTERNAL_H
52668+#define __GRINTERNAL_H
52669+
52670+#ifdef CONFIG_GRKERNSEC
52671+
52672+#include <linux/fs.h>
52673+#include <linux/mnt_namespace.h>
52674+#include <linux/nsproxy.h>
52675+#include <linux/gracl.h>
52676+#include <linux/grdefs.h>
52677+#include <linux/grmsg.h>
52678+
52679+void gr_add_learn_entry(const char *fmt, ...)
52680+ __attribute__ ((format (printf, 1, 2)));
52681+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52682+ const struct vfsmount *mnt);
52683+__u32 gr_check_create(const struct dentry *new_dentry,
52684+ const struct dentry *parent,
52685+ const struct vfsmount *mnt, const __u32 mode);
52686+int gr_check_protected_task(const struct task_struct *task);
52687+__u32 to_gr_audit(const __u32 reqmode);
52688+int gr_set_acls(const int type);
52689+int gr_apply_subject_to_task(struct task_struct *task);
52690+int gr_acl_is_enabled(void);
52691+char gr_roletype_to_char(void);
52692+
52693+void gr_handle_alertkill(struct task_struct *task);
52694+char *gr_to_filename(const struct dentry *dentry,
52695+ const struct vfsmount *mnt);
52696+char *gr_to_filename1(const struct dentry *dentry,
52697+ const struct vfsmount *mnt);
52698+char *gr_to_filename2(const struct dentry *dentry,
52699+ const struct vfsmount *mnt);
52700+char *gr_to_filename3(const struct dentry *dentry,
52701+ const struct vfsmount *mnt);
52702+
52703+extern int grsec_enable_harden_ptrace;
52704+extern int grsec_enable_link;
52705+extern int grsec_enable_fifo;
52706+extern int grsec_enable_execve;
52707+extern int grsec_enable_shm;
52708+extern int grsec_enable_execlog;
52709+extern int grsec_enable_signal;
52710+extern int grsec_enable_audit_ptrace;
52711+extern int grsec_enable_forkfail;
52712+extern int grsec_enable_time;
52713+extern int grsec_enable_rofs;
52714+extern int grsec_enable_chroot_shmat;
52715+extern int grsec_enable_chroot_mount;
52716+extern int grsec_enable_chroot_double;
52717+extern int grsec_enable_chroot_pivot;
52718+extern int grsec_enable_chroot_chdir;
52719+extern int grsec_enable_chroot_chmod;
52720+extern int grsec_enable_chroot_mknod;
52721+extern int grsec_enable_chroot_fchdir;
52722+extern int grsec_enable_chroot_nice;
52723+extern int grsec_enable_chroot_execlog;
52724+extern int grsec_enable_chroot_caps;
52725+extern int grsec_enable_chroot_sysctl;
52726+extern int grsec_enable_chroot_unix;
52727+extern int grsec_enable_tpe;
52728+extern int grsec_tpe_gid;
52729+extern int grsec_enable_tpe_all;
52730+extern int grsec_enable_tpe_invert;
52731+extern int grsec_enable_socket_all;
52732+extern int grsec_socket_all_gid;
52733+extern int grsec_enable_socket_client;
52734+extern int grsec_socket_client_gid;
52735+extern int grsec_enable_socket_server;
52736+extern int grsec_socket_server_gid;
52737+extern int grsec_audit_gid;
52738+extern int grsec_enable_group;
52739+extern int grsec_enable_audit_textrel;
52740+extern int grsec_enable_log_rwxmaps;
52741+extern int grsec_enable_mount;
52742+extern int grsec_enable_chdir;
52743+extern int grsec_resource_logging;
52744+extern int grsec_enable_blackhole;
52745+extern int grsec_lastack_retries;
52746+extern int grsec_enable_brute;
52747+extern int grsec_lock;
52748+
52749+extern spinlock_t grsec_alert_lock;
52750+extern unsigned long grsec_alert_wtime;
52751+extern unsigned long grsec_alert_fyet;
52752+
52753+extern spinlock_t grsec_audit_lock;
52754+
52755+extern rwlock_t grsec_exec_file_lock;
52756+
52757+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52758+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52759+ (tsk)->exec_file->f_vfsmnt) : "/")
52760+
52761+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52762+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52763+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52764+
52765+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52766+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
52767+ (tsk)->exec_file->f_vfsmnt) : "/")
52768+
52769+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52770+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52771+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52772+
52773+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52774+
52775+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52776+
52777+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52778+ (task)->pid, (cred)->uid, \
52779+ (cred)->euid, (cred)->gid, (cred)->egid, \
52780+ gr_parent_task_fullpath(task), \
52781+ (task)->real_parent->comm, (task)->real_parent->pid, \
52782+ (pcred)->uid, (pcred)->euid, \
52783+ (pcred)->gid, (pcred)->egid
52784+
52785+#define GR_CHROOT_CAPS {{ \
52786+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52787+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52788+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52789+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52790+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52791+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52792+
52793+#define security_learn(normal_msg,args...) \
52794+({ \
52795+ read_lock(&grsec_exec_file_lock); \
52796+ gr_add_learn_entry(normal_msg "\n", ## args); \
52797+ read_unlock(&grsec_exec_file_lock); \
52798+})
52799+
52800+enum {
52801+ GR_DO_AUDIT,
52802+ GR_DONT_AUDIT,
52803+ /* used for non-audit messages that we shouldn't kill the task on */
52804+ GR_DONT_AUDIT_GOOD
52805+};
52806+
52807+enum {
52808+ GR_TTYSNIFF,
52809+ GR_RBAC,
52810+ GR_RBAC_STR,
52811+ GR_STR_RBAC,
52812+ GR_RBAC_MODE2,
52813+ GR_RBAC_MODE3,
52814+ GR_FILENAME,
52815+ GR_SYSCTL_HIDDEN,
52816+ GR_NOARGS,
52817+ GR_ONE_INT,
52818+ GR_ONE_INT_TWO_STR,
52819+ GR_ONE_STR,
52820+ GR_STR_INT,
52821+ GR_TWO_STR_INT,
52822+ GR_TWO_INT,
52823+ GR_TWO_U64,
52824+ GR_THREE_INT,
52825+ GR_FIVE_INT_TWO_STR,
52826+ GR_TWO_STR,
52827+ GR_THREE_STR,
52828+ GR_FOUR_STR,
52829+ GR_STR_FILENAME,
52830+ GR_FILENAME_STR,
52831+ GR_FILENAME_TWO_INT,
52832+ GR_FILENAME_TWO_INT_STR,
52833+ GR_TEXTREL,
52834+ GR_PTRACE,
52835+ GR_RESOURCE,
52836+ GR_CAP,
52837+ GR_SIG,
52838+ GR_SIG2,
52839+ GR_CRASH1,
52840+ GR_CRASH2,
52841+ GR_PSACCT,
52842+ GR_RWXMAP
52843+};
52844+
52845+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52846+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52847+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52848+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52849+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52850+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52851+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52852+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52853+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52854+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52855+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52856+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52857+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52858+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52859+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52860+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52861+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52862+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52863+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52864+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52865+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52866+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52867+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52868+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52869+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52870+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52871+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52872+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52873+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52874+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52875+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52876+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52877+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52878+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52879+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52880+
52881+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52882+
52883+#endif
52884+
52885+#endif
52886diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52887--- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52888+++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52889@@ -0,0 +1,108 @@
52890+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52891+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52892+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52893+#define GR_STOPMOD_MSG "denied modification of module state by "
52894+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52895+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52896+#define GR_IOPERM_MSG "denied use of ioperm() by "
52897+#define GR_IOPL_MSG "denied use of iopl() by "
52898+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52899+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52900+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52901+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52902+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52903+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52904+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52905+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52906+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52907+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52908+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52909+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52910+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52911+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52912+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52913+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52914+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52915+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52916+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52917+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52918+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52919+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52920+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52921+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52922+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52923+#define GR_NPROC_MSG "denied overstep of process limit by "
52924+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52925+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52926+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52927+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52928+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52929+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52930+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52931+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52932+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52933+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52934+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52935+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52936+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52937+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52938+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52939+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52940+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52941+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52942+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52943+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52944+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52945+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52946+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52947+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52948+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52949+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52950+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52951+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52952+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52953+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52954+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52955+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52956+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52957+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52958+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52959+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52960+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52961+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52962+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52963+#define GR_FAILFORK_MSG "failed fork with errno %s by "
52964+#define GR_NICE_CHROOT_MSG "denied priority change by "
52965+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52966+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52967+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52968+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52969+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52970+#define GR_TIME_MSG "time set by "
52971+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52972+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52973+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52974+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52975+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52976+#define GR_BIND_MSG "denied bind() by "
52977+#define GR_CONNECT_MSG "denied connect() by "
52978+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52979+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52980+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52981+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52982+#define GR_CAP_ACL_MSG "use of %s denied for "
52983+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52984+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52985+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52986+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52987+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52988+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52989+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52990+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52991+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52992+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52993+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52994+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52995+#define GR_VM86_MSG "denied use of vm86 by "
52996+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52997+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52998diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
52999--- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
53000+++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
53001@@ -0,0 +1,218 @@
53002+#ifndef GR_SECURITY_H
53003+#define GR_SECURITY_H
53004+#include <linux/fs.h>
53005+#include <linux/fs_struct.h>
53006+#include <linux/binfmts.h>
53007+#include <linux/gracl.h>
53008+#include <linux/compat.h>
53009+
53010+/* notify of brain-dead configs */
53011+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53012+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
53013+#endif
53014+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
53015+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
53016+#endif
53017+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53018+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53019+#endif
53020+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53021+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53022+#endif
53023+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
53024+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
53025+#endif
53026+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
53027+#error "CONFIG_PAX enabled, but no PaX options are enabled."
53028+#endif
53029+
53030+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
53031+void gr_handle_brute_check(void);
53032+void gr_handle_kernel_exploit(void);
53033+int gr_process_user_ban(void);
53034+
53035+char gr_roletype_to_char(void);
53036+
53037+int gr_acl_enable_at_secure(void);
53038+
53039+int gr_check_user_change(int real, int effective, int fs);
53040+int gr_check_group_change(int real, int effective, int fs);
53041+
53042+void gr_del_task_from_ip_table(struct task_struct *p);
53043+
53044+int gr_pid_is_chrooted(struct task_struct *p);
53045+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
53046+int gr_handle_chroot_nice(void);
53047+int gr_handle_chroot_sysctl(const int op);
53048+int gr_handle_chroot_setpriority(struct task_struct *p,
53049+ const int niceval);
53050+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
53051+int gr_handle_chroot_chroot(const struct dentry *dentry,
53052+ const struct vfsmount *mnt);
53053+int gr_handle_chroot_caps(struct path *path);
53054+void gr_handle_chroot_chdir(struct path *path);
53055+int gr_handle_chroot_chmod(const struct dentry *dentry,
53056+ const struct vfsmount *mnt, const int mode);
53057+int gr_handle_chroot_mknod(const struct dentry *dentry,
53058+ const struct vfsmount *mnt, const int mode);
53059+int gr_handle_chroot_mount(const struct dentry *dentry,
53060+ const struct vfsmount *mnt,
53061+ const char *dev_name);
53062+int gr_handle_chroot_pivot(void);
53063+int gr_handle_chroot_unix(const pid_t pid);
53064+
53065+int gr_handle_rawio(const struct inode *inode);
53066+int gr_handle_nproc(void);
53067+
53068+void gr_handle_ioperm(void);
53069+void gr_handle_iopl(void);
53070+
53071+int gr_tpe_allow(const struct file *file);
53072+
53073+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
53074+void gr_clear_chroot_entries(struct task_struct *task);
53075+
53076+void gr_log_forkfail(const int retval);
53077+void gr_log_timechange(void);
53078+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
53079+void gr_log_chdir(const struct dentry *dentry,
53080+ const struct vfsmount *mnt);
53081+void gr_log_chroot_exec(const struct dentry *dentry,
53082+ const struct vfsmount *mnt);
53083+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
53084+#ifdef CONFIG_COMPAT
53085+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
53086+#endif
53087+void gr_log_remount(const char *devname, const int retval);
53088+void gr_log_unmount(const char *devname, const int retval);
53089+void gr_log_mount(const char *from, const char *to, const int retval);
53090+void gr_log_textrel(struct vm_area_struct *vma);
53091+void gr_log_rwxmmap(struct file *file);
53092+void gr_log_rwxmprotect(struct file *file);
53093+
53094+int gr_handle_follow_link(const struct inode *parent,
53095+ const struct inode *inode,
53096+ const struct dentry *dentry,
53097+ const struct vfsmount *mnt);
53098+int gr_handle_fifo(const struct dentry *dentry,
53099+ const struct vfsmount *mnt,
53100+ const struct dentry *dir, const int flag,
53101+ const int acc_mode);
53102+int gr_handle_hardlink(const struct dentry *dentry,
53103+ const struct vfsmount *mnt,
53104+ struct inode *inode,
53105+ const int mode, const char *to);
53106+
53107+int gr_is_capable(const int cap);
53108+int gr_is_capable_nolog(const int cap);
53109+void gr_learn_resource(const struct task_struct *task, const int limit,
53110+ const unsigned long wanted, const int gt);
53111+void gr_copy_label(struct task_struct *tsk);
53112+void gr_handle_crash(struct task_struct *task, const int sig);
53113+int gr_handle_signal(const struct task_struct *p, const int sig);
53114+int gr_check_crash_uid(const uid_t uid);
53115+int gr_check_protected_task(const struct task_struct *task);
53116+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
53117+int gr_acl_handle_mmap(const struct file *file,
53118+ const unsigned long prot);
53119+int gr_acl_handle_mprotect(const struct file *file,
53120+ const unsigned long prot);
53121+int gr_check_hidden_task(const struct task_struct *tsk);
53122+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
53123+ const struct vfsmount *mnt);
53124+__u32 gr_acl_handle_utime(const struct dentry *dentry,
53125+ const struct vfsmount *mnt);
53126+__u32 gr_acl_handle_access(const struct dentry *dentry,
53127+ const struct vfsmount *mnt, const int fmode);
53128+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
53129+ const struct vfsmount *mnt, mode_t mode);
53130+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
53131+ const struct vfsmount *mnt, mode_t mode);
53132+__u32 gr_acl_handle_chown(const struct dentry *dentry,
53133+ const struct vfsmount *mnt);
53134+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
53135+ const struct vfsmount *mnt);
53136+int gr_handle_ptrace(struct task_struct *task, const long request);
53137+int gr_handle_proc_ptrace(struct task_struct *task);
53138+__u32 gr_acl_handle_execve(const struct dentry *dentry,
53139+ const struct vfsmount *mnt);
53140+int gr_check_crash_exec(const struct file *filp);
53141+int gr_acl_is_enabled(void);
53142+void gr_set_kernel_label(struct task_struct *task);
53143+void gr_set_role_label(struct task_struct *task, const uid_t uid,
53144+ const gid_t gid);
53145+int gr_set_proc_label(const struct dentry *dentry,
53146+ const struct vfsmount *mnt,
53147+ const int unsafe_share);
53148+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53149+ const struct vfsmount *mnt);
53150+__u32 gr_acl_handle_open(const struct dentry *dentry,
53151+ const struct vfsmount *mnt, const int fmode);
53152+__u32 gr_acl_handle_creat(const struct dentry *dentry,
53153+ const struct dentry *p_dentry,
53154+ const struct vfsmount *p_mnt, const int fmode,
53155+ const int imode);
53156+void gr_handle_create(const struct dentry *dentry,
53157+ const struct vfsmount *mnt);
53158+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53159+ const struct dentry *parent_dentry,
53160+ const struct vfsmount *parent_mnt,
53161+ const int mode);
53162+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53163+ const struct dentry *parent_dentry,
53164+ const struct vfsmount *parent_mnt);
53165+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53166+ const struct vfsmount *mnt);
53167+void gr_handle_delete(const ino_t ino, const dev_t dev);
53168+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53169+ const struct vfsmount *mnt);
53170+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53171+ const struct dentry *parent_dentry,
53172+ const struct vfsmount *parent_mnt,
53173+ const char *from);
53174+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53175+ const struct dentry *parent_dentry,
53176+ const struct vfsmount *parent_mnt,
53177+ const struct dentry *old_dentry,
53178+ const struct vfsmount *old_mnt, const char *to);
53179+int gr_acl_handle_rename(struct dentry *new_dentry,
53180+ struct dentry *parent_dentry,
53181+ const struct vfsmount *parent_mnt,
53182+ struct dentry *old_dentry,
53183+ struct inode *old_parent_inode,
53184+ struct vfsmount *old_mnt, const char *newname);
53185+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53186+ struct dentry *old_dentry,
53187+ struct dentry *new_dentry,
53188+ struct vfsmount *mnt, const __u8 replace);
53189+__u32 gr_check_link(const struct dentry *new_dentry,
53190+ const struct dentry *parent_dentry,
53191+ const struct vfsmount *parent_mnt,
53192+ const struct dentry *old_dentry,
53193+ const struct vfsmount *old_mnt);
53194+int gr_acl_handle_filldir(const struct file *file, const char *name,
53195+ const unsigned int namelen, const ino_t ino);
53196+
53197+__u32 gr_acl_handle_unix(const struct dentry *dentry,
53198+ const struct vfsmount *mnt);
53199+void gr_acl_handle_exit(void);
53200+void gr_acl_handle_psacct(struct task_struct *task, const long code);
53201+int gr_acl_handle_procpidmem(const struct task_struct *task);
53202+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53203+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53204+void gr_audit_ptrace(struct task_struct *task);
53205+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53206+
53207+#ifdef CONFIG_GRKERNSEC
53208+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53209+void gr_handle_vm86(void);
53210+void gr_handle_mem_readwrite(u64 from, u64 to);
53211+
53212+extern int grsec_enable_dmesg;
53213+extern int grsec_disable_privio;
53214+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53215+extern int grsec_enable_chroot_findtask;
53216+#endif
53217+#endif
53218+
53219+#endif
53220diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53221--- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53222+++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53223@@ -0,0 +1,19 @@
53224+#ifndef __GRSOCK_H
53225+#define __GRSOCK_H
53226+
53227+extern void gr_attach_curr_ip(const struct sock *sk);
53228+extern int gr_handle_sock_all(const int family, const int type,
53229+ const int protocol);
53230+extern int gr_handle_sock_server(const struct sockaddr *sck);
53231+extern int gr_handle_sock_server_other(const struct sock *sck);
53232+extern int gr_handle_sock_client(const struct sockaddr *sck);
53233+extern int gr_search_connect(struct socket * sock,
53234+ struct sockaddr_in * addr);
53235+extern int gr_search_bind(struct socket * sock,
53236+ struct sockaddr_in * addr);
53237+extern int gr_search_listen(struct socket * sock);
53238+extern int gr_search_accept(struct socket * sock);
53239+extern int gr_search_socket(const int domain, const int type,
53240+ const int protocol);
53241+
53242+#endif
53243diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53244--- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53245+++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53246@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53247 kunmap_atomic(kaddr, KM_USER0);
53248 }
53249
53250+static inline void sanitize_highpage(struct page *page)
53251+{
53252+ void *kaddr;
53253+ unsigned long flags;
53254+
53255+ local_irq_save(flags);
53256+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
53257+ clear_page(kaddr);
53258+ kunmap_atomic(kaddr, KM_CLEARPAGE);
53259+ local_irq_restore(flags);
53260+}
53261+
53262 static inline void zero_user_segments(struct page *page,
53263 unsigned start1, unsigned end1,
53264 unsigned start2, unsigned end2)
53265diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53266--- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53267+++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53268@@ -346,6 +346,7 @@ struct i2c_algorithm {
53269 /* To determine what the adapter supports */
53270 u32 (*functionality) (struct i2c_adapter *);
53271 };
53272+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53273
53274 /*
53275 * i2c_adapter is the structure used to identify a physical i2c bus along
53276diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53277--- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53278+++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53279@@ -564,7 +564,7 @@ struct i2o_controller {
53280 struct i2o_device *exec; /* Executive */
53281 #if BITS_PER_LONG == 64
53282 spinlock_t context_list_lock; /* lock for context_list */
53283- atomic_t context_list_counter; /* needed for unique contexts */
53284+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53285 struct list_head context_list; /* list of context id's
53286 and pointers */
53287 #endif
53288diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53289--- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53290+++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53291@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53292
53293 /* Each module must use one module_init(). */
53294 #define module_init(initfn) \
53295- static inline initcall_t __inittest(void) \
53296+ static inline __used initcall_t __inittest(void) \
53297 { return initfn; } \
53298 int init_module(void) __attribute__((alias(#initfn)));
53299
53300 /* This is only required if you want to be unloadable. */
53301 #define module_exit(exitfn) \
53302- static inline exitcall_t __exittest(void) \
53303+ static inline __used exitcall_t __exittest(void) \
53304 { return exitfn; } \
53305 void cleanup_module(void) __attribute__((alias(#exitfn)));
53306
53307diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53308--- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53309+++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53310@@ -83,6 +83,12 @@ extern struct group_info init_groups;
53311 #define INIT_IDS
53312 #endif
53313
53314+#ifdef CONFIG_X86
53315+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53316+#else
53317+#define INIT_TASK_THREAD_INFO
53318+#endif
53319+
53320 /*
53321 * Because of the reduced scope of CAP_SETPCAP when filesystem
53322 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53323@@ -163,6 +169,7 @@ extern struct cred init_cred;
53324 RCU_INIT_POINTER(.cred, &init_cred), \
53325 .comm = "swapper", \
53326 .thread = INIT_THREAD, \
53327+ INIT_TASK_THREAD_INFO \
53328 .fs = &init_fs, \
53329 .files = &init_files, \
53330 .signal = &init_signals, \
53331diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53332--- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53333+++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53334@@ -296,7 +296,7 @@ struct iommu_flush {
53335 u8 fm, u64 type);
53336 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53337 unsigned int size_order, u64 type);
53338-};
53339+} __no_const;
53340
53341 enum {
53342 SR_DMAR_FECTL_REG,
53343diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53344--- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53345+++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53346@@ -422,7 +422,7 @@ enum
53347 /* map softirq index to softirq name. update 'softirq_to_name' in
53348 * kernel/softirq.c when adding a new softirq.
53349 */
53350-extern char *softirq_to_name[NR_SOFTIRQS];
53351+extern const char * const softirq_to_name[NR_SOFTIRQS];
53352
53353 /* softirq mask and active fields moved to irq_cpustat_t in
53354 * asm/hardirq.h to get better cache usage. KAO
53355@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53356
53357 struct softirq_action
53358 {
53359- void (*action)(struct softirq_action *);
53360+ void (*action)(void);
53361 };
53362
53363 asmlinkage void do_softirq(void);
53364 asmlinkage void __do_softirq(void);
53365-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53366+extern void open_softirq(int nr, void (*action)(void));
53367 extern void softirq_init(void);
53368 static inline void __raise_softirq_irqoff(unsigned int nr)
53369 {
53370diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53371--- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53372+++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53373@@ -15,7 +15,8 @@
53374
53375 struct module;
53376
53377-#ifdef CONFIG_KALLSYMS
53378+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53379+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53380 /* Lookup the address for a symbol. Returns 0 if not found. */
53381 unsigned long kallsyms_lookup_name(const char *name);
53382
53383@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53384 /* Stupid that this does nothing, but I didn't create this mess. */
53385 #define __print_symbol(fmt, addr)
53386 #endif /*CONFIG_KALLSYMS*/
53387+#else /* when included by kallsyms.c, vsnprintf.c, or
53388+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53389+extern void __print_symbol(const char *fmt, unsigned long address);
53390+extern int sprint_backtrace(char *buffer, unsigned long address);
53391+extern int sprint_symbol(char *buffer, unsigned long address);
53392+const char *kallsyms_lookup(unsigned long addr,
53393+ unsigned long *symbolsize,
53394+ unsigned long *offset,
53395+ char **modname, char *namebuf);
53396+#endif
53397
53398 /* This macro allows us to keep printk typechecking */
53399 static void __check_printsym_format(const char *fmt, ...)
53400diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53401--- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53402+++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53403@@ -53,7 +53,7 @@ extern int kgdb_connected;
53404 extern int kgdb_io_module_registered;
53405
53406 extern atomic_t kgdb_setting_breakpoint;
53407-extern atomic_t kgdb_cpu_doing_single_step;
53408+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53409
53410 extern struct task_struct *kgdb_usethread;
53411 extern struct task_struct *kgdb_contthread;
53412@@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53413 * hardware debug registers.
53414 */
53415 struct kgdb_arch {
53416- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53417- unsigned long flags;
53418+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53419+ const unsigned long flags;
53420
53421 int (*set_breakpoint)(unsigned long, char *);
53422 int (*remove_breakpoint)(unsigned long, char *);
53423@@ -268,14 +268,14 @@ struct kgdb_arch {
53424 * not a console
53425 */
53426 struct kgdb_io {
53427- const char *name;
53428+ const char * const name;
53429 int (*read_char) (void);
53430 void (*write_char) (u8);
53431 void (*flush) (void);
53432 int (*init) (void);
53433 void (*pre_exception) (void);
53434 void (*post_exception) (void);
53435- int is_console;
53436+ const int is_console;
53437 };
53438
53439 extern struct kgdb_arch arch_kgdb_ops;
53440diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53441--- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53442+++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53443@@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53444 * usually useless though. */
53445 extern int __request_module(bool wait, const char *name, ...) \
53446 __attribute__((format(printf, 2, 3)));
53447+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53448+ __attribute__((format(printf, 3, 4)));
53449 #define request_module(mod...) __request_module(true, mod)
53450 #define request_module_nowait(mod...) __request_module(false, mod)
53451 #define try_then_request_module(x, mod...) \
53452diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53453--- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53454+++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53455@@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53456 void vcpu_load(struct kvm_vcpu *vcpu);
53457 void vcpu_put(struct kvm_vcpu *vcpu);
53458
53459-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53460+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53461 struct module *module);
53462 void kvm_exit(void);
53463
53464@@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53465 struct kvm_guest_debug *dbg);
53466 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53467
53468-int kvm_arch_init(void *opaque);
53469+int kvm_arch_init(const void *opaque);
53470 void kvm_arch_exit(void);
53471
53472 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53473diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53474--- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53475+++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53476@@ -898,7 +898,7 @@ struct ata_port_operations {
53477 * ->inherits must be the last field and all the preceding
53478 * fields must be pointers.
53479 */
53480- const struct ata_port_operations *inherits;
53481+ const struct ata_port_operations * const inherits;
53482 };
53483
53484 struct ata_port_info {
53485diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53486--- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53487+++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53488@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53489 int region);
53490 void * (*mca_transform_memory)(struct mca_device *,
53491 void *memory);
53492-};
53493+} __no_const;
53494
53495 struct mca_bus {
53496 u64 default_dma_mask;
53497diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53498--- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53499+++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53500@@ -142,7 +142,7 @@ struct memory_accessor {
53501 size_t count);
53502 ssize_t (*write)(struct memory_accessor *, const char *buf,
53503 off_t offset, size_t count);
53504-};
53505+} __no_const;
53506
53507 /*
53508 * Kernel text modification mutex, used for code patching. Users of this lock
53509diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53510--- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53511+++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53512@@ -226,6 +226,7 @@ struct abx500_ops {
53513 int (*event_registers_startup_state_get) (struct device *, u8 *);
53514 int (*startup_irq_enabled) (struct device *, unsigned int);
53515 };
53516+typedef struct abx500_ops __no_const abx500_ops_no_const;
53517
53518 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53519 void abx500_remove_ops(struct device *dev);
53520diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53521--- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53522+++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53523@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53524
53525 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53526 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53527+
53528+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53529+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53530+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53531+#else
53532 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53533+#endif
53534+
53535 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53536 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53537
53538@@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53539 int set_page_dirty_lock(struct page *page);
53540 int clear_page_dirty_for_io(struct page *page);
53541
53542-/* Is the vma a continuation of the stack vma above it? */
53543-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53544-{
53545- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53546-}
53547-
53548-static inline int stack_guard_page_start(struct vm_area_struct *vma,
53549- unsigned long addr)
53550-{
53551- return (vma->vm_flags & VM_GROWSDOWN) &&
53552- (vma->vm_start == addr) &&
53553- !vma_growsdown(vma->vm_prev, addr);
53554-}
53555-
53556-/* Is the vma a continuation of the stack vma below it? */
53557-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53558-{
53559- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53560-}
53561-
53562-static inline int stack_guard_page_end(struct vm_area_struct *vma,
53563- unsigned long addr)
53564-{
53565- return (vma->vm_flags & VM_GROWSUP) &&
53566- (vma->vm_end == addr) &&
53567- !vma_growsup(vma->vm_next, addr);
53568-}
53569-
53570 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53571 unsigned long old_addr, struct vm_area_struct *new_vma,
53572 unsigned long new_addr, unsigned long len);
53573@@ -1189,6 +1168,15 @@ struct shrinker {
53574 extern void register_shrinker(struct shrinker *);
53575 extern void unregister_shrinker(struct shrinker *);
53576
53577+#ifdef CONFIG_MMU
53578+pgprot_t vm_get_page_prot(unsigned long vm_flags);
53579+#else
53580+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53581+{
53582+ return __pgprot(0);
53583+}
53584+#endif
53585+
53586 int vma_wants_writenotify(struct vm_area_struct *vma);
53587
53588 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53589@@ -1476,6 +1464,7 @@ out:
53590 }
53591
53592 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53593+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53594
53595 extern unsigned long do_brk(unsigned long, unsigned long);
53596
53597@@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53598 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53599 struct vm_area_struct **pprev);
53600
53601+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53602+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53603+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53604+
53605 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53606 NULL if none. Assume start_addr < end_addr. */
53607 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53608@@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53609 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53610 }
53611
53612-#ifdef CONFIG_MMU
53613-pgprot_t vm_get_page_prot(unsigned long vm_flags);
53614-#else
53615-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53616-{
53617- return __pgprot(0);
53618-}
53619-#endif
53620-
53621 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53622 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53623 unsigned long pfn, unsigned long size, pgprot_t);
53624@@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53625 extern int sysctl_memory_failure_early_kill;
53626 extern int sysctl_memory_failure_recovery;
53627 extern void shake_page(struct page *p, int access);
53628-extern atomic_long_t mce_bad_pages;
53629+extern atomic_long_unchecked_t mce_bad_pages;
53630 extern int soft_offline_page(struct page *page, int flags);
53631
53632 extern void dump_page(struct page *page);
53633@@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53634 unsigned int pages_per_huge_page);
53635 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53636
53637+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53638+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53639+#else
53640+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53641+#endif
53642+
53643 #endif /* __KERNEL__ */
53644 #endif /* _LINUX_MM_H */
53645diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53646--- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53647+++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53648@@ -183,6 +183,8 @@ struct vm_area_struct {
53649 #ifdef CONFIG_NUMA
53650 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53651 #endif
53652+
53653+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53654 };
53655
53656 struct core_thread {
53657@@ -317,6 +319,24 @@ struct mm_struct {
53658 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53659 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53660 #endif
53661+
53662+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53663+ unsigned long pax_flags;
53664+#endif
53665+
53666+#ifdef CONFIG_PAX_DLRESOLVE
53667+ unsigned long call_dl_resolve;
53668+#endif
53669+
53670+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53671+ unsigned long call_syscall;
53672+#endif
53673+
53674+#ifdef CONFIG_PAX_ASLR
53675+ unsigned long delta_mmap; /* randomized offset */
53676+ unsigned long delta_stack; /* randomized offset */
53677+#endif
53678+
53679 };
53680
53681 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53682diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53683--- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53684+++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53685@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53686 */
53687 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53688 ({ \
53689- pte_t __pte; \
53690+ pte_t ___pte; \
53691 struct vm_area_struct *___vma = __vma; \
53692 unsigned long ___address = __address; \
53693- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53694+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53695 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53696- __pte; \
53697+ ___pte; \
53698 })
53699
53700 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53701diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53702--- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53703+++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53704@@ -355,7 +355,7 @@ struct zone {
53705 unsigned long flags; /* zone flags, see below */
53706
53707 /* Zone statistics */
53708- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53709+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53710
53711 /*
53712 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53713diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53714--- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53715+++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53716@@ -12,7 +12,7 @@
53717 typedef unsigned long kernel_ulong_t;
53718 #endif
53719
53720-#define PCI_ANY_ID (~0)
53721+#define PCI_ANY_ID ((__u16)~0)
53722
53723 struct pci_device_id {
53724 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53725@@ -131,7 +131,7 @@ struct usb_device_id {
53726 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53727 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53728
53729-#define HID_ANY_ID (~0)
53730+#define HID_ANY_ID (~0U)
53731
53732 struct hid_device_id {
53733 __u16 bus;
53734diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53735--- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53736+++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53737@@ -16,6 +16,7 @@
53738 #include <linux/kobject.h>
53739 #include <linux/moduleparam.h>
53740 #include <linux/tracepoint.h>
53741+#include <linux/fs.h>
53742
53743 #include <linux/percpu.h>
53744 #include <asm/module.h>
53745@@ -324,19 +325,16 @@ struct module
53746 int (*init)(void);
53747
53748 /* If this is non-NULL, vfree after init() returns */
53749- void *module_init;
53750+ void *module_init_rx, *module_init_rw;
53751
53752 /* Here is the actual code + data, vfree'd on unload. */
53753- void *module_core;
53754+ void *module_core_rx, *module_core_rw;
53755
53756 /* Here are the sizes of the init and core sections */
53757- unsigned int init_size, core_size;
53758+ unsigned int init_size_rw, core_size_rw;
53759
53760 /* The size of the executable code in each section. */
53761- unsigned int init_text_size, core_text_size;
53762-
53763- /* Size of RO sections of the module (text+rodata) */
53764- unsigned int init_ro_size, core_ro_size;
53765+ unsigned int init_size_rx, core_size_rx;
53766
53767 /* Arch-specific module values */
53768 struct mod_arch_specific arch;
53769@@ -391,6 +389,10 @@ struct module
53770 #ifdef CONFIG_EVENT_TRACING
53771 struct ftrace_event_call **trace_events;
53772 unsigned int num_trace_events;
53773+ struct file_operations trace_id;
53774+ struct file_operations trace_enable;
53775+ struct file_operations trace_format;
53776+ struct file_operations trace_filter;
53777 #endif
53778 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53779 unsigned long *ftrace_callsites;
53780@@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53781 bool is_module_percpu_address(unsigned long addr);
53782 bool is_module_text_address(unsigned long addr);
53783
53784+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53785+{
53786+
53787+#ifdef CONFIG_PAX_KERNEXEC
53788+ if (ktla_ktva(addr) >= (unsigned long)start &&
53789+ ktla_ktva(addr) < (unsigned long)start + size)
53790+ return 1;
53791+#endif
53792+
53793+ return ((void *)addr >= start && (void *)addr < start + size);
53794+}
53795+
53796+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53797+{
53798+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53799+}
53800+
53801+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53802+{
53803+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53804+}
53805+
53806+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53807+{
53808+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53809+}
53810+
53811+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53812+{
53813+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53814+}
53815+
53816 static inline int within_module_core(unsigned long addr, struct module *mod)
53817 {
53818- return (unsigned long)mod->module_core <= addr &&
53819- addr < (unsigned long)mod->module_core + mod->core_size;
53820+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53821 }
53822
53823 static inline int within_module_init(unsigned long addr, struct module *mod)
53824 {
53825- return (unsigned long)mod->module_init <= addr &&
53826- addr < (unsigned long)mod->module_init + mod->init_size;
53827+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53828 }
53829
53830 /* Search for module by name: must hold module_mutex. */
53831diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53832--- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53833+++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53834@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53835 sections. Returns NULL on failure. */
53836 void *module_alloc(unsigned long size);
53837
53838+#ifdef CONFIG_PAX_KERNEXEC
53839+void *module_alloc_exec(unsigned long size);
53840+#else
53841+#define module_alloc_exec(x) module_alloc(x)
53842+#endif
53843+
53844 /* Free memory returned from module_alloc. */
53845 void module_free(struct module *mod, void *module_region);
53846
53847+#ifdef CONFIG_PAX_KERNEXEC
53848+void module_free_exec(struct module *mod, void *module_region);
53849+#else
53850+#define module_free_exec(x, y) module_free((x), (y))
53851+#endif
53852+
53853 /* Apply the given relocation to the (simplified) ELF. Return -error
53854 or 0. */
53855 int apply_relocate(Elf_Shdr *sechdrs,
53856diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53857--- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53858+++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53859@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53860 * @len is usually just sizeof(string).
53861 */
53862 #define module_param_string(name, string, len, perm) \
53863- static const struct kparam_string __param_string_##name \
53864+ static const struct kparam_string __param_string_##name __used \
53865 = { len, string }; \
53866 __module_param_call(MODULE_PARAM_PREFIX, name, \
53867 &param_ops_string, \
53868@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53869 * module_param_named() for why this might be necessary.
53870 */
53871 #define module_param_array_named(name, array, type, nump, perm) \
53872- static const struct kparam_array __param_arr_##name \
53873+ static const struct kparam_array __param_arr_##name __used \
53874 = { ARRAY_SIZE(array), nump, &param_ops_##type, \
53875 sizeof(array[0]), array }; \
53876 __module_param_call(MODULE_PARAM_PREFIX, name, \
53877diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53878--- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53879+++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53880@@ -51,7 +51,7 @@ struct mutex {
53881 spinlock_t wait_lock;
53882 struct list_head wait_list;
53883 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53884- struct thread_info *owner;
53885+ struct task_struct *owner;
53886 #endif
53887 #ifdef CONFIG_DEBUG_MUTEXES
53888 const char *name;
53889diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53890--- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53891+++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53892@@ -24,7 +24,7 @@ struct nameidata {
53893 unsigned seq;
53894 int last_type;
53895 unsigned depth;
53896- char *saved_names[MAX_NESTED_LINKS + 1];
53897+ const char *saved_names[MAX_NESTED_LINKS + 1];
53898
53899 /* Intent data */
53900 union {
53901@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53902 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53903 extern void unlock_rename(struct dentry *, struct dentry *);
53904
53905-static inline void nd_set_link(struct nameidata *nd, char *path)
53906+static inline void nd_set_link(struct nameidata *nd, const char *path)
53907 {
53908 nd->saved_names[nd->depth] = path;
53909 }
53910
53911-static inline char *nd_get_link(struct nameidata *nd)
53912+static inline const char *nd_get_link(const struct nameidata *nd)
53913 {
53914 return nd->saved_names[nd->depth];
53915 }
53916diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53917--- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53918+++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53919@@ -979,6 +979,7 @@ struct net_device_ops {
53920 int (*ndo_set_features)(struct net_device *dev,
53921 u32 features);
53922 };
53923+typedef struct net_device_ops __no_const net_device_ops_no_const;
53924
53925 /*
53926 * The DEVICE structure.
53927diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53928--- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53929+++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53930@@ -0,0 +1,9 @@
53931+#ifndef _LINUX_NETFILTER_XT_GRADM_H
53932+#define _LINUX_NETFILTER_XT_GRADM_H 1
53933+
53934+struct xt_gradm_mtinfo {
53935+ __u16 flags;
53936+ __u16 invflags;
53937+};
53938+
53939+#endif
53940diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53941--- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53942+++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53943@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53944 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53945 char const * name, ulong * val);
53946
53947-/** Create a file for read-only access to an atomic_t. */
53948+/** Create a file for read-only access to an atomic_unchecked_t. */
53949 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53950- char const * name, atomic_t * val);
53951+ char const * name, atomic_unchecked_t * val);
53952
53953 /** create a directory */
53954 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53955diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53956--- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53957+++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53958@@ -129,7 +129,7 @@ struct parallel_data {
53959 struct padata_instance *pinst;
53960 struct padata_parallel_queue __percpu *pqueue;
53961 struct padata_serial_queue __percpu *squeue;
53962- atomic_t seq_nr;
53963+ atomic_unchecked_t seq_nr;
53964 atomic_t reorder_objects;
53965 atomic_t refcnt;
53966 unsigned int max_seq_nr;
53967diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53968--- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53969+++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53970@@ -759,8 +759,8 @@ struct perf_event {
53971
53972 enum perf_event_active_state state;
53973 unsigned int attach_state;
53974- local64_t count;
53975- atomic64_t child_count;
53976+ local64_t count; /* PaX: fix it one day */
53977+ atomic64_unchecked_t child_count;
53978
53979 /*
53980 * These are the total time in nanoseconds that the event
53981@@ -811,8 +811,8 @@ struct perf_event {
53982 * These accumulate total time (in nanoseconds) that children
53983 * events have been enabled and running, respectively.
53984 */
53985- atomic64_t child_total_time_enabled;
53986- atomic64_t child_total_time_running;
53987+ atomic64_unchecked_t child_total_time_enabled;
53988+ atomic64_unchecked_t child_total_time_running;
53989
53990 /*
53991 * Protect attach/detach and child_list:
53992diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
53993--- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
53994+++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
53995@@ -46,9 +46,9 @@ struct pipe_buffer {
53996 struct pipe_inode_info {
53997 wait_queue_head_t wait;
53998 unsigned int nrbufs, curbuf, buffers;
53999- unsigned int readers;
54000- unsigned int writers;
54001- unsigned int waiting_writers;
54002+ atomic_t readers;
54003+ atomic_t writers;
54004+ atomic_t waiting_writers;
54005 unsigned int r_counter;
54006 unsigned int w_counter;
54007 struct page *tmp_page;
54008diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
54009--- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
54010+++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
54011@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
54012
54013 static inline void pm_runtime_mark_last_busy(struct device *dev)
54014 {
54015- ACCESS_ONCE(dev->power.last_busy) = jiffies;
54016+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
54017 }
54018
54019 #else /* !CONFIG_PM_RUNTIME */
54020diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
54021--- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
54022+++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
54023@@ -19,8 +19,8 @@
54024 * under normal circumstances, used to verify that nobody uses
54025 * non-initialized list entries.
54026 */
54027-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
54028-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
54029+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
54030+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
54031
54032 /********** include/linux/timer.h **********/
54033 /*
54034diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
54035--- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
54036+++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
54037@@ -115,7 +115,7 @@ struct preempt_ops {
54038 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
54039 void (*sched_out)(struct preempt_notifier *notifier,
54040 struct task_struct *next);
54041-};
54042+} __no_const;
54043
54044 /**
54045 * preempt_notifier - key for installing preemption notifiers
54046diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
54047--- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
54048+++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
54049@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
54050 return proc_create_data(name, mode, parent, proc_fops, NULL);
54051 }
54052
54053+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
54054+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
54055+{
54056+#ifdef CONFIG_GRKERNSEC_PROC_USER
54057+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
54058+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54059+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
54060+#else
54061+ return proc_create_data(name, mode, parent, proc_fops, NULL);
54062+#endif
54063+}
54064+
54065+
54066 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
54067 mode_t mode, struct proc_dir_entry *base,
54068 read_proc_t *read_proc, void * data)
54069@@ -258,7 +271,7 @@ union proc_op {
54070 int (*proc_show)(struct seq_file *m,
54071 struct pid_namespace *ns, struct pid *pid,
54072 struct task_struct *task);
54073-};
54074+} __no_const;
54075
54076 struct ctl_table_header;
54077 struct ctl_table;
54078diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
54079--- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
54080+++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
54081@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
54082 extern void exit_ptrace(struct task_struct *tracer);
54083 #define PTRACE_MODE_READ 1
54084 #define PTRACE_MODE_ATTACH 2
54085-/* Returns 0 on success, -errno on denial. */
54086-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
54087 /* Returns true on success, false on denial. */
54088 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
54089+/* Returns true on success, false on denial. */
54090+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
54091
54092 static inline int ptrace_reparented(struct task_struct *child)
54093 {
54094diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
54095--- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
54096+++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
54097@@ -80,12 +80,17 @@ void srandom32(u32 seed);
54098
54099 u32 prandom32(struct rnd_state *);
54100
54101+static inline unsigned long pax_get_random_long(void)
54102+{
54103+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
54104+}
54105+
54106 /*
54107 * Handle minimum values for seeds
54108 */
54109 static inline u32 __seed(u32 x, u32 m)
54110 {
54111- return (x < m) ? x + m : x;
54112+ return (x <= m) ? x + m + 1 : x;
54113 }
54114
54115 /**
54116diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
54117--- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
54118+++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
54119@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
54120 * Architecture-specific implementations of sys_reboot commands.
54121 */
54122
54123-extern void machine_restart(char *cmd);
54124-extern void machine_halt(void);
54125-extern void machine_power_off(void);
54126+extern void machine_restart(char *cmd) __noreturn;
54127+extern void machine_halt(void) __noreturn;
54128+extern void machine_power_off(void) __noreturn;
54129
54130 extern void machine_shutdown(void);
54131 struct pt_regs;
54132@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
54133 */
54134
54135 extern void kernel_restart_prepare(char *cmd);
54136-extern void kernel_restart(char *cmd);
54137-extern void kernel_halt(void);
54138-extern void kernel_power_off(void);
54139+extern void kernel_restart(char *cmd) __noreturn;
54140+extern void kernel_halt(void) __noreturn;
54141+extern void kernel_power_off(void) __noreturn;
54142
54143 extern int C_A_D; /* for sysctl */
54144 void ctrl_alt_del(void);
54145@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
54146 * Emergency restart, callable from an interrupt handler.
54147 */
54148
54149-extern void emergency_restart(void);
54150+extern void emergency_restart(void) __noreturn;
54151 #include <asm/emergency-restart.h>
54152
54153 #endif
54154diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54155--- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54156+++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54157@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54158 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54159
54160 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54161-#define get_generation(s) atomic_read (&fs_generation(s))
54162+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54163 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54164 #define __fs_changed(gen,s) (gen != get_generation (s))
54165 #define fs_changed(gen,s) \
54166diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54167--- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54168+++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54169@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54170 /* Comment? -Hans */
54171 wait_queue_head_t s_wait;
54172 /* To be obsoleted soon by per buffer seals.. -Hans */
54173- atomic_t s_generation_counter; // increased by one every time the
54174+ atomic_unchecked_t s_generation_counter; // increased by one every time the
54175 // tree gets re-balanced
54176 unsigned long s_properties; /* File system properties. Currently holds
54177 on-disk FS format */
54178diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54179--- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54180+++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54181@@ -159,7 +159,7 @@ struct rchan_callbacks
54182 * The callback should return 0 if successful, negative if not.
54183 */
54184 int (*remove_buf_file)(struct dentry *dentry);
54185-};
54186+} __no_const;
54187
54188 /*
54189 * CONFIG_RELAY kernel API, kernel/relay.c
54190diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54191--- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54192+++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54193@@ -147,6 +147,7 @@ struct rfkill_ops {
54194 void (*query)(struct rfkill *rfkill, void *data);
54195 int (*set_block)(void *data, bool blocked);
54196 };
54197+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54198
54199 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54200 /**
54201diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54202--- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54203+++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54204@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54205 void anon_vma_init(void); /* create anon_vma_cachep */
54206 int anon_vma_prepare(struct vm_area_struct *);
54207 void unlink_anon_vmas(struct vm_area_struct *);
54208-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54209-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54210+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54211+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54212 void __anon_vma_link(struct vm_area_struct *);
54213
54214 static inline void anon_vma_merge(struct vm_area_struct *vma,
54215diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54216--- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54217+++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54218@@ -100,6 +100,7 @@ struct bio_list;
54219 struct fs_struct;
54220 struct perf_event_context;
54221 struct blk_plug;
54222+struct linux_binprm;
54223
54224 /*
54225 * List of flags we want to share for kernel threads,
54226@@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54227 extern signed long schedule_timeout_killable(signed long timeout);
54228 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54229 asmlinkage void schedule(void);
54230-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54231+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54232
54233 struct nsproxy;
54234 struct user_namespace;
54235@@ -381,10 +382,13 @@ struct user_namespace;
54236 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54237
54238 extern int sysctl_max_map_count;
54239+extern unsigned long sysctl_heap_stack_gap;
54240
54241 #include <linux/aio.h>
54242
54243 #ifdef CONFIG_MMU
54244+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54245+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54246 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54247 extern unsigned long
54248 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54249@@ -629,6 +633,17 @@ struct signal_struct {
54250 #ifdef CONFIG_TASKSTATS
54251 struct taskstats *stats;
54252 #endif
54253+
54254+#ifdef CONFIG_GRKERNSEC
54255+ u32 curr_ip;
54256+ u32 saved_ip;
54257+ u32 gr_saddr;
54258+ u32 gr_daddr;
54259+ u16 gr_sport;
54260+ u16 gr_dport;
54261+ u8 used_accept:1;
54262+#endif
54263+
54264 #ifdef CONFIG_AUDIT
54265 unsigned audit_tty;
54266 struct tty_audit_buf *tty_audit_buf;
54267@@ -701,6 +716,11 @@ struct user_struct {
54268 struct key *session_keyring; /* UID's default session keyring */
54269 #endif
54270
54271+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54272+ unsigned int banned;
54273+ unsigned long ban_expires;
54274+#endif
54275+
54276 /* Hash table maintenance information */
54277 struct hlist_node uidhash_node;
54278 uid_t uid;
54279@@ -1310,8 +1330,8 @@ struct task_struct {
54280 struct list_head thread_group;
54281
54282 struct completion *vfork_done; /* for vfork() */
54283- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54284- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54285+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54286+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54287
54288 cputime_t utime, stime, utimescaled, stimescaled;
54289 cputime_t gtime;
54290@@ -1327,13 +1347,6 @@ struct task_struct {
54291 struct task_cputime cputime_expires;
54292 struct list_head cpu_timers[3];
54293
54294-/* process credentials */
54295- const struct cred __rcu *real_cred; /* objective and real subjective task
54296- * credentials (COW) */
54297- const struct cred __rcu *cred; /* effective (overridable) subjective task
54298- * credentials (COW) */
54299- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54300-
54301 char comm[TASK_COMM_LEN]; /* executable name excluding path
54302 - access with [gs]et_task_comm (which lock
54303 it with task_lock())
54304@@ -1350,8 +1363,16 @@ struct task_struct {
54305 #endif
54306 /* CPU-specific state of this task */
54307 struct thread_struct thread;
54308+/* thread_info moved to task_struct */
54309+#ifdef CONFIG_X86
54310+ struct thread_info tinfo;
54311+#endif
54312 /* filesystem information */
54313 struct fs_struct *fs;
54314+
54315+ const struct cred __rcu *cred; /* effective (overridable) subjective task
54316+ * credentials (COW) */
54317+
54318 /* open file information */
54319 struct files_struct *files;
54320 /* namespaces */
54321@@ -1398,6 +1419,11 @@ struct task_struct {
54322 struct rt_mutex_waiter *pi_blocked_on;
54323 #endif
54324
54325+/* process credentials */
54326+ const struct cred __rcu *real_cred; /* objective and real subjective task
54327+ * credentials (COW) */
54328+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54329+
54330 #ifdef CONFIG_DEBUG_MUTEXES
54331 /* mutex deadlock detection */
54332 struct mutex_waiter *blocked_on;
54333@@ -1508,6 +1534,21 @@ struct task_struct {
54334 unsigned long default_timer_slack_ns;
54335
54336 struct list_head *scm_work_list;
54337+
54338+#ifdef CONFIG_GRKERNSEC
54339+ /* grsecurity */
54340+ struct dentry *gr_chroot_dentry;
54341+ struct acl_subject_label *acl;
54342+ struct acl_role_label *role;
54343+ struct file *exec_file;
54344+ u16 acl_role_id;
54345+ /* is this the task that authenticated to the special role */
54346+ u8 acl_sp_role;
54347+ u8 is_writable;
54348+ u8 brute;
54349+ u8 gr_is_chrooted;
54350+#endif
54351+
54352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54353 /* Index of current stored address in ret_stack */
54354 int curr_ret_stack;
54355@@ -1542,6 +1583,57 @@ struct task_struct {
54356 #endif
54357 };
54358
54359+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54360+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54361+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54362+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54363+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54364+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54365+
54366+#ifdef CONFIG_PAX_SOFTMODE
54367+extern int pax_softmode;
54368+#endif
54369+
54370+extern int pax_check_flags(unsigned long *);
54371+
54372+/* if tsk != current then task_lock must be held on it */
54373+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54374+static inline unsigned long pax_get_flags(struct task_struct *tsk)
54375+{
54376+ if (likely(tsk->mm))
54377+ return tsk->mm->pax_flags;
54378+ else
54379+ return 0UL;
54380+}
54381+
54382+/* if tsk != current then task_lock must be held on it */
54383+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54384+{
54385+ if (likely(tsk->mm)) {
54386+ tsk->mm->pax_flags = flags;
54387+ return 0;
54388+ }
54389+ return -EINVAL;
54390+}
54391+#endif
54392+
54393+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54394+extern void pax_set_initial_flags(struct linux_binprm *bprm);
54395+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54396+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54397+#endif
54398+
54399+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54400+extern void pax_report_insns(void *pc, void *sp);
54401+extern void pax_report_refcount_overflow(struct pt_regs *regs);
54402+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54403+
54404+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54405+extern void pax_track_stack(void);
54406+#else
54407+static inline void pax_track_stack(void) {}
54408+#endif
54409+
54410 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54411 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54412
54413@@ -2009,7 +2101,9 @@ void yield(void);
54414 extern struct exec_domain default_exec_domain;
54415
54416 union thread_union {
54417+#ifndef CONFIG_X86
54418 struct thread_info thread_info;
54419+#endif
54420 unsigned long stack[THREAD_SIZE/sizeof(long)];
54421 };
54422
54423@@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54424 */
54425
54426 extern struct task_struct *find_task_by_vpid(pid_t nr);
54427+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54428 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54429 struct pid_namespace *ns);
54430
54431@@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54432 extern void exit_itimers(struct signal_struct *);
54433 extern void flush_itimer_signals(void);
54434
54435-extern NORET_TYPE void do_group_exit(int);
54436+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54437
54438 extern void daemonize(const char *, ...);
54439 extern int allow_signal(int);
54440@@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54441
54442 #endif
54443
54444-static inline int object_is_on_stack(void *obj)
54445+static inline int object_starts_on_stack(void *obj)
54446 {
54447- void *stack = task_stack_page(current);
54448+ const void *stack = task_stack_page(current);
54449
54450 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54451 }
54452
54453+#ifdef CONFIG_PAX_USERCOPY
54454+extern int object_is_on_stack(const void *obj, unsigned long len);
54455+#endif
54456+
54457 extern void thread_info_cache_init(void);
54458
54459 #ifdef CONFIG_DEBUG_STACK_USAGE
54460diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54461--- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54462+++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54463@@ -43,7 +43,8 @@ struct screen_info {
54464 __u16 pages; /* 0x32 */
54465 __u16 vesa_attributes; /* 0x34 */
54466 __u32 capabilities; /* 0x36 */
54467- __u8 _reserved[6]; /* 0x3a */
54468+ __u16 vesapm_size; /* 0x3a */
54469+ __u8 _reserved[4]; /* 0x3c */
54470 } __attribute__((packed));
54471
54472 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54473diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54474--- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54475+++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54476@@ -36,6 +36,7 @@
54477 #include <linux/key.h>
54478 #include <linux/xfrm.h>
54479 #include <linux/slab.h>
54480+#include <linux/grsecurity.h>
54481 #include <net/flow.h>
54482
54483 /* Maximum number of letters for an LSM name string */
54484diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54485--- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54486+++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54487@@ -32,6 +32,7 @@ struct seq_operations {
54488 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54489 int (*show) (struct seq_file *m, void *v);
54490 };
54491+typedef struct seq_operations __no_const seq_operations_no_const;
54492
54493 #define SEQ_SKIP 1
54494
54495diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54496--- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54497+++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54498@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54499 pid_t shm_cprid;
54500 pid_t shm_lprid;
54501 struct user_struct *mlock_user;
54502+#ifdef CONFIG_GRKERNSEC
54503+ time_t shm_createtime;
54504+ pid_t shm_lapid;
54505+#endif
54506 };
54507
54508 /* shm_mode upper byte flags */
54509diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54510--- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54511+++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54512@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54513 */
54514 static inline int skb_queue_empty(const struct sk_buff_head *list)
54515 {
54516- return list->next == (struct sk_buff *)list;
54517+ return list->next == (const struct sk_buff *)list;
54518 }
54519
54520 /**
54521@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54522 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54523 const struct sk_buff *skb)
54524 {
54525- return skb->next == (struct sk_buff *)list;
54526+ return skb->next == (const struct sk_buff *)list;
54527 }
54528
54529 /**
54530@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54531 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54532 const struct sk_buff *skb)
54533 {
54534- return skb->prev == (struct sk_buff *)list;
54535+ return skb->prev == (const struct sk_buff *)list;
54536 }
54537
54538 /**
54539@@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54540 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54541 */
54542 #ifndef NET_SKB_PAD
54543-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54544+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54545 #endif
54546
54547 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54548diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54549--- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54550+++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54551@@ -96,10 +96,10 @@ struct kmem_cache {
54552 unsigned long node_allocs;
54553 unsigned long node_frees;
54554 unsigned long node_overflow;
54555- atomic_t allochit;
54556- atomic_t allocmiss;
54557- atomic_t freehit;
54558- atomic_t freemiss;
54559+ atomic_unchecked_t allochit;
54560+ atomic_unchecked_t allocmiss;
54561+ atomic_unchecked_t freehit;
54562+ atomic_unchecked_t freemiss;
54563
54564 /*
54565 * If debugging is enabled, then the allocator can add additional
54566diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54567--- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54568+++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54569@@ -11,12 +11,20 @@
54570
54571 #include <linux/gfp.h>
54572 #include <linux/types.h>
54573+#include <linux/err.h>
54574
54575 /*
54576 * Flags to pass to kmem_cache_create().
54577 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54578 */
54579 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54580+
54581+#ifdef CONFIG_PAX_USERCOPY
54582+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54583+#else
54584+#define SLAB_USERCOPY 0x00000000UL
54585+#endif
54586+
54587 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54588 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54589 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54590@@ -87,10 +95,13 @@
54591 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54592 * Both make kfree a no-op.
54593 */
54594-#define ZERO_SIZE_PTR ((void *)16)
54595+#define ZERO_SIZE_PTR \
54596+({ \
54597+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54598+ (void *)(-MAX_ERRNO-1L); \
54599+})
54600
54601-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54602- (unsigned long)ZERO_SIZE_PTR)
54603+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54604
54605 /*
54606 * struct kmem_cache related prototypes
54607@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54608 void kfree(const void *);
54609 void kzfree(const void *);
54610 size_t ksize(const void *);
54611+void check_object_size(const void *ptr, unsigned long n, bool to);
54612
54613 /*
54614 * Allocator specific definitions. These are mainly used to establish optimized
54615@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54616
54617 void __init kmem_cache_init_late(void);
54618
54619+#define kmalloc(x, y) \
54620+({ \
54621+ void *___retval; \
54622+ intoverflow_t ___x = (intoverflow_t)x; \
54623+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54624+ ___retval = NULL; \
54625+ else \
54626+ ___retval = kmalloc((size_t)___x, (y)); \
54627+ ___retval; \
54628+})
54629+
54630+#define kmalloc_node(x, y, z) \
54631+({ \
54632+ void *___retval; \
54633+ intoverflow_t ___x = (intoverflow_t)x; \
54634+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54635+ ___retval = NULL; \
54636+ else \
54637+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
54638+ ___retval; \
54639+})
54640+
54641+#define kzalloc(x, y) \
54642+({ \
54643+ void *___retval; \
54644+ intoverflow_t ___x = (intoverflow_t)x; \
54645+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54646+ ___retval = NULL; \
54647+ else \
54648+ ___retval = kzalloc((size_t)___x, (y)); \
54649+ ___retval; \
54650+})
54651+
54652+#define __krealloc(x, y, z) \
54653+({ \
54654+ void *___retval; \
54655+ intoverflow_t ___y = (intoverflow_t)y; \
54656+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54657+ ___retval = NULL; \
54658+ else \
54659+ ___retval = __krealloc((x), (size_t)___y, (z)); \
54660+ ___retval; \
54661+})
54662+
54663+#define krealloc(x, y, z) \
54664+({ \
54665+ void *___retval; \
54666+ intoverflow_t ___y = (intoverflow_t)y; \
54667+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54668+ ___retval = NULL; \
54669+ else \
54670+ ___retval = krealloc((x), (size_t)___y, (z)); \
54671+ ___retval; \
54672+})
54673+
54674 #endif /* _LINUX_SLAB_H */
54675diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54676--- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54677+++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54678@@ -84,7 +84,7 @@ struct kmem_cache {
54679 struct kmem_cache_order_objects max;
54680 struct kmem_cache_order_objects min;
54681 gfp_t allocflags; /* gfp flags to use on each alloc */
54682- int refcount; /* Refcount for slab cache destroy */
54683+ atomic_t refcount; /* Refcount for slab cache destroy */
54684 void (*ctor)(void *);
54685 int inuse; /* Offset to metadata */
54686 int align; /* Alignment */
54687@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54688 }
54689
54690 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54691-void *__kmalloc(size_t size, gfp_t flags);
54692+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54693
54694 static __always_inline void *
54695 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54696diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54697--- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54698+++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54699@@ -61,7 +61,7 @@ struct sonet_stats {
54700 #include <asm/atomic.h>
54701
54702 struct k_sonet_stats {
54703-#define __HANDLE_ITEM(i) atomic_t i
54704+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54705 __SONET_ITEMS
54706 #undef __HANDLE_ITEM
54707 };
54708diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54709--- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54710+++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54711@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54712 {
54713 switch (sap->sa_family) {
54714 case AF_INET:
54715- return ntohs(((struct sockaddr_in *)sap)->sin_port);
54716+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54717 case AF_INET6:
54718- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54719+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54720 }
54721 return 0;
54722 }
54723@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54724 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54725 const struct sockaddr *src)
54726 {
54727- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54728+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54729 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54730
54731 dsin->sin_family = ssin->sin_family;
54732@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54733 if (sa->sa_family != AF_INET6)
54734 return 0;
54735
54736- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54737+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54738 }
54739
54740 #endif /* __KERNEL__ */
54741diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54742--- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54743+++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54744@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54745 extern unsigned int svcrdma_max_requests;
54746 extern unsigned int svcrdma_max_req_size;
54747
54748-extern atomic_t rdma_stat_recv;
54749-extern atomic_t rdma_stat_read;
54750-extern atomic_t rdma_stat_write;
54751-extern atomic_t rdma_stat_sq_starve;
54752-extern atomic_t rdma_stat_rq_starve;
54753-extern atomic_t rdma_stat_rq_poll;
54754-extern atomic_t rdma_stat_rq_prod;
54755-extern atomic_t rdma_stat_sq_poll;
54756-extern atomic_t rdma_stat_sq_prod;
54757+extern atomic_unchecked_t rdma_stat_recv;
54758+extern atomic_unchecked_t rdma_stat_read;
54759+extern atomic_unchecked_t rdma_stat_write;
54760+extern atomic_unchecked_t rdma_stat_sq_starve;
54761+extern atomic_unchecked_t rdma_stat_rq_starve;
54762+extern atomic_unchecked_t rdma_stat_rq_poll;
54763+extern atomic_unchecked_t rdma_stat_rq_prod;
54764+extern atomic_unchecked_t rdma_stat_sq_poll;
54765+extern atomic_unchecked_t rdma_stat_sq_prod;
54766
54767 #define RPCRDMA_VERSION 1
54768
54769diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54770--- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54771+++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54772@@ -155,7 +155,11 @@ enum
54773 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54774 };
54775
54776-
54777+#ifdef CONFIG_PAX_SOFTMODE
54778+enum {
54779+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54780+};
54781+#endif
54782
54783 /* CTL_VM names: */
54784 enum
54785@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54786
54787 extern int proc_dostring(struct ctl_table *, int,
54788 void __user *, size_t *, loff_t *);
54789+extern int proc_dostring_modpriv(struct ctl_table *, int,
54790+ void __user *, size_t *, loff_t *);
54791 extern int proc_dointvec(struct ctl_table *, int,
54792 void __user *, size_t *, loff_t *);
54793 extern int proc_dointvec_minmax(struct ctl_table *, int,
54794diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54795--- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54796+++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54797@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54798
54799 struct module *owner;
54800
54801- int refcount;
54802+ atomic_t refcount;
54803 };
54804
54805 struct tty_ldisc {
54806diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54807--- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54808+++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54809@@ -213,10 +213,26 @@ typedef struct {
54810 int counter;
54811 } atomic_t;
54812
54813+#ifdef CONFIG_PAX_REFCOUNT
54814+typedef struct {
54815+ int counter;
54816+} atomic_unchecked_t;
54817+#else
54818+typedef atomic_t atomic_unchecked_t;
54819+#endif
54820+
54821 #ifdef CONFIG_64BIT
54822 typedef struct {
54823 long counter;
54824 } atomic64_t;
54825+
54826+#ifdef CONFIG_PAX_REFCOUNT
54827+typedef struct {
54828+ long counter;
54829+} atomic64_unchecked_t;
54830+#else
54831+typedef atomic64_t atomic64_unchecked_t;
54832+#endif
54833 #endif
54834
54835 struct list_head {
54836diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54837--- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54838+++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54839@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54840 long ret; \
54841 mm_segment_t old_fs = get_fs(); \
54842 \
54843- set_fs(KERNEL_DS); \
54844 pagefault_disable(); \
54845+ set_fs(KERNEL_DS); \
54846 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54847- pagefault_enable(); \
54848 set_fs(old_fs); \
54849+ pagefault_enable(); \
54850 ret; \
54851 })
54852
54853@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54854 * Safely read from address @src to the buffer at @dst. If a kernel fault
54855 * happens, handle that and return -EFAULT.
54856 */
54857-extern long probe_kernel_read(void *dst, void *src, size_t size);
54858-extern long __probe_kernel_read(void *dst, void *src, size_t size);
54859+extern long probe_kernel_read(void *dst, const void *src, size_t size);
54860+extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54861
54862 /*
54863 * probe_kernel_write(): safely attempt to write to a location
54864@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54865 * Safely write to address @dst from the buffer at @src. If a kernel fault
54866 * happens, handle that and return -EFAULT.
54867 */
54868-extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54869-extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54870+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54871+extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54872
54873 #endif /* __LINUX_UACCESS_H__ */
54874diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54875--- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54876+++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54877@@ -6,32 +6,32 @@
54878
54879 static inline u16 get_unaligned_le16(const void *p)
54880 {
54881- return le16_to_cpup((__le16 *)p);
54882+ return le16_to_cpup((const __le16 *)p);
54883 }
54884
54885 static inline u32 get_unaligned_le32(const void *p)
54886 {
54887- return le32_to_cpup((__le32 *)p);
54888+ return le32_to_cpup((const __le32 *)p);
54889 }
54890
54891 static inline u64 get_unaligned_le64(const void *p)
54892 {
54893- return le64_to_cpup((__le64 *)p);
54894+ return le64_to_cpup((const __le64 *)p);
54895 }
54896
54897 static inline u16 get_unaligned_be16(const void *p)
54898 {
54899- return be16_to_cpup((__be16 *)p);
54900+ return be16_to_cpup((const __be16 *)p);
54901 }
54902
54903 static inline u32 get_unaligned_be32(const void *p)
54904 {
54905- return be32_to_cpup((__be32 *)p);
54906+ return be32_to_cpup((const __be32 *)p);
54907 }
54908
54909 static inline u64 get_unaligned_be64(const void *p)
54910 {
54911- return be64_to_cpup((__be64 *)p);
54912+ return be64_to_cpup((const __be64 *)p);
54913 }
54914
54915 static inline void put_unaligned_le16(u16 val, void *p)
54916diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54917--- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54918+++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54919@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54920 #define VM_MAP 0x00000004 /* vmap()ed pages */
54921 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54922 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54923+
54924+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54925+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54926+#endif
54927+
54928 /* bits [20..32] reserved for arch specific ioremap internals */
54929
54930 /*
54931@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54932 # endif
54933 #endif
54934
54935+#define vmalloc(x) \
54936+({ \
54937+ void *___retval; \
54938+ intoverflow_t ___x = (intoverflow_t)x; \
54939+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54940+ ___retval = NULL; \
54941+ else \
54942+ ___retval = vmalloc((unsigned long)___x); \
54943+ ___retval; \
54944+})
54945+
54946+#define vzalloc(x) \
54947+({ \
54948+ void *___retval; \
54949+ intoverflow_t ___x = (intoverflow_t)x; \
54950+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54951+ ___retval = NULL; \
54952+ else \
54953+ ___retval = vzalloc((unsigned long)___x); \
54954+ ___retval; \
54955+})
54956+
54957+#define __vmalloc(x, y, z) \
54958+({ \
54959+ void *___retval; \
54960+ intoverflow_t ___x = (intoverflow_t)x; \
54961+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54962+ ___retval = NULL; \
54963+ else \
54964+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54965+ ___retval; \
54966+})
54967+
54968+#define vmalloc_user(x) \
54969+({ \
54970+ void *___retval; \
54971+ intoverflow_t ___x = (intoverflow_t)x; \
54972+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54973+ ___retval = NULL; \
54974+ else \
54975+ ___retval = vmalloc_user((unsigned long)___x); \
54976+ ___retval; \
54977+})
54978+
54979+#define vmalloc_exec(x) \
54980+({ \
54981+ void *___retval; \
54982+ intoverflow_t ___x = (intoverflow_t)x; \
54983+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54984+ ___retval = NULL; \
54985+ else \
54986+ ___retval = vmalloc_exec((unsigned long)___x); \
54987+ ___retval; \
54988+})
54989+
54990+#define vmalloc_node(x, y) \
54991+({ \
54992+ void *___retval; \
54993+ intoverflow_t ___x = (intoverflow_t)x; \
54994+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54995+ ___retval = NULL; \
54996+ else \
54997+ ___retval = vmalloc_node((unsigned long)___x, (y));\
54998+ ___retval; \
54999+})
55000+
55001+#define vzalloc_node(x, y) \
55002+({ \
55003+ void *___retval; \
55004+ intoverflow_t ___x = (intoverflow_t)x; \
55005+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
55006+ ___retval = NULL; \
55007+ else \
55008+ ___retval = vzalloc_node((unsigned long)___x, (y));\
55009+ ___retval; \
55010+})
55011+
55012+#define vmalloc_32(x) \
55013+({ \
55014+ void *___retval; \
55015+ intoverflow_t ___x = (intoverflow_t)x; \
55016+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
55017+ ___retval = NULL; \
55018+ else \
55019+ ___retval = vmalloc_32((unsigned long)___x); \
55020+ ___retval; \
55021+})
55022+
55023+#define vmalloc_32_user(x) \
55024+({ \
55025+void *___retval; \
55026+ intoverflow_t ___x = (intoverflow_t)x; \
55027+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
55028+ ___retval = NULL; \
55029+ else \
55030+ ___retval = vmalloc_32_user((unsigned long)___x);\
55031+ ___retval; \
55032+})
55033+
55034 #endif /* _LINUX_VMALLOC_H */
55035diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
55036--- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
55037+++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
55038@@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
55039 /*
55040 * Zone based page accounting with per cpu differentials.
55041 */
55042-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55043+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55044
55045 static inline void zone_page_state_add(long x, struct zone *zone,
55046 enum zone_stat_item item)
55047 {
55048- atomic_long_add(x, &zone->vm_stat[item]);
55049- atomic_long_add(x, &vm_stat[item]);
55050+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
55051+ atomic_long_add_unchecked(x, &vm_stat[item]);
55052 }
55053
55054 static inline unsigned long global_page_state(enum zone_stat_item item)
55055 {
55056- long x = atomic_long_read(&vm_stat[item]);
55057+ long x = atomic_long_read_unchecked(&vm_stat[item]);
55058 #ifdef CONFIG_SMP
55059 if (x < 0)
55060 x = 0;
55061@@ -169,7 +169,7 @@ static inline unsigned long global_page_
55062 static inline unsigned long zone_page_state(struct zone *zone,
55063 enum zone_stat_item item)
55064 {
55065- long x = atomic_long_read(&zone->vm_stat[item]);
55066+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55067 #ifdef CONFIG_SMP
55068 if (x < 0)
55069 x = 0;
55070@@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
55071 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
55072 enum zone_stat_item item)
55073 {
55074- long x = atomic_long_read(&zone->vm_stat[item]);
55075+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55076
55077 #ifdef CONFIG_SMP
55078 int cpu;
55079@@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
55080
55081 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
55082 {
55083- atomic_long_inc(&zone->vm_stat[item]);
55084- atomic_long_inc(&vm_stat[item]);
55085+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
55086+ atomic_long_inc_unchecked(&vm_stat[item]);
55087 }
55088
55089 static inline void __inc_zone_page_state(struct page *page,
55090@@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
55091
55092 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
55093 {
55094- atomic_long_dec(&zone->vm_stat[item]);
55095- atomic_long_dec(&vm_stat[item]);
55096+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
55097+ atomic_long_dec_unchecked(&vm_stat[item]);
55098 }
55099
55100 static inline void __dec_zone_page_state(struct page *page,
55101diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
55102--- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
55103+++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
55104@@ -163,7 +163,7 @@ struct saa7146_ext_vv
55105 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
55106
55107 /* the extension can override this */
55108- struct v4l2_ioctl_ops ops;
55109+ v4l2_ioctl_ops_no_const ops;
55110 /* pointer to the saa7146 core ops */
55111 const struct v4l2_ioctl_ops *core_ops;
55112
55113diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
55114--- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
55115+++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
55116@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
55117
55118
55119 struct v4l2_file_operations {
55120- struct module *owner;
55121+ struct module * const owner;
55122 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
55123 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
55124 unsigned int (*poll) (struct file *, struct poll_table_struct *);
55125diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
55126--- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
55127+++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
55128@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
55129 this function returns 0. If the name ends with a digit (e.g. cx18),
55130 then the name will be set to cx18-0 since cx180 looks really odd. */
55131 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
55132- atomic_t *instance);
55133+ atomic_unchecked_t *instance);
55134
55135 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
55136 Since the parent disappears this ensures that v4l2_dev doesn't have an
55137diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
55138--- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
55139+++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
55140@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
55141 long (*vidioc_default) (struct file *file, void *fh,
55142 bool valid_prio, int cmd, void *arg);
55143 };
55144+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
55145
55146
55147 /* v4l debugging and diagnostics */
55148diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55149--- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55150+++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55151@@ -52,7 +52,7 @@ struct cfctrl_rsp {
55152 void (*radioset_rsp)(void);
55153 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55154 struct cflayer *client_layer);
55155-};
55156+} __no_const;
55157
55158 /* Link Setup Parameters for CAIF-Links. */
55159 struct cfctrl_link_param {
55160@@ -101,8 +101,8 @@ struct cfctrl_request_info {
55161 struct cfctrl {
55162 struct cfsrvl serv;
55163 struct cfctrl_rsp res;
55164- atomic_t req_seq_no;
55165- atomic_t rsp_seq_no;
55166+ atomic_unchecked_t req_seq_no;
55167+ atomic_unchecked_t rsp_seq_no;
55168 struct list_head list;
55169 /* Protects from simultaneous access to first_req list */
55170 spinlock_t info_list_lock;
55171diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55172--- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55173+++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55174@@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55175 u8 dir, flow_resolve_t resolver, void *ctx);
55176
55177 extern void flow_cache_flush(void);
55178-extern atomic_t flow_cache_genid;
55179+extern atomic_unchecked_t flow_cache_genid;
55180
55181 #endif
55182diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55183--- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55184+++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55185@@ -43,8 +43,8 @@ struct inet_peer {
55186 */
55187 union {
55188 struct {
55189- atomic_t rid; /* Frag reception counter */
55190- atomic_t ip_id_count; /* IP ID for the next packet */
55191+ atomic_unchecked_t rid; /* Frag reception counter */
55192+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55193 __u32 tcp_ts;
55194 __u32 tcp_ts_stamp;
55195 u32 metrics[RTAX_MAX];
55196@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55197 {
55198 more++;
55199 inet_peer_refcheck(p);
55200- return atomic_add_return(more, &p->ip_id_count) - more;
55201+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55202 }
55203
55204 #endif /* _NET_INETPEER_H */
55205diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55206--- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55207+++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55208@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55209
55210 #define FIB_RES_SADDR(net, res) \
55211 ((FIB_RES_NH(res).nh_saddr_genid == \
55212- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55213+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55214 FIB_RES_NH(res).nh_saddr : \
55215 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55216 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55217diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55218--- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55219+++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55220@@ -512,7 +512,7 @@ struct ip_vs_conn {
55221 struct ip_vs_conn *control; /* Master control connection */
55222 atomic_t n_control; /* Number of controlled ones */
55223 struct ip_vs_dest *dest; /* real server */
55224- atomic_t in_pkts; /* incoming packet counter */
55225+ atomic_unchecked_t in_pkts; /* incoming packet counter */
55226
55227 /* packet transmitter for different forwarding methods. If it
55228 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55229@@ -650,7 +650,7 @@ struct ip_vs_dest {
55230 __be16 port; /* port number of the server */
55231 union nf_inet_addr addr; /* IP address of the server */
55232 volatile unsigned flags; /* dest status flags */
55233- atomic_t conn_flags; /* flags to copy to conn */
55234+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
55235 atomic_t weight; /* server weight */
55236
55237 atomic_t refcnt; /* reference counter */
55238diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55239--- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55240+++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55241@@ -51,7 +51,7 @@ typedef struct {
55242 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55243 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55244 struct ircomm_info *);
55245-} call_t;
55246+} __no_const call_t;
55247
55248 struct ircomm_cb {
55249 irda_queue_t queue;
55250diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55251--- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55252+++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55253@@ -35,6 +35,7 @@
55254 #include <linux/termios.h>
55255 #include <linux/timer.h>
55256 #include <linux/tty.h> /* struct tty_struct */
55257+#include <asm/local.h>
55258
55259 #include <net/irda/irias_object.h>
55260 #include <net/irda/ircomm_core.h>
55261@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55262 unsigned short close_delay;
55263 unsigned short closing_wait; /* time to wait before closing */
55264
55265- int open_count;
55266- int blocked_open; /* # of blocked opens */
55267+ local_t open_count;
55268+ local_t blocked_open; /* # of blocked opens */
55269
55270 /* Protect concurent access to :
55271 * o self->open_count
55272diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55273--- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55274+++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55275@@ -87,7 +87,7 @@ struct iucv_sock {
55276 struct iucv_sock_list {
55277 struct hlist_head head;
55278 rwlock_t lock;
55279- atomic_t autobind_name;
55280+ atomic_unchecked_t autobind_name;
55281 };
55282
55283 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55284diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55285--- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55286+++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55287@@ -95,7 +95,7 @@ struct lapb_cb {
55288 struct sk_buff_head write_queue;
55289 struct sk_buff_head ack_queue;
55290 unsigned char window;
55291- struct lapb_register_struct callbacks;
55292+ struct lapb_register_struct *callbacks;
55293
55294 /* FRMR control information */
55295 struct lapb_frame frmr_data;
55296diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55297--- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55298+++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55299@@ -117,7 +117,7 @@ struct neighbour {
55300 };
55301
55302 struct neigh_ops {
55303- int family;
55304+ const int family;
55305 void (*solicit)(struct neighbour *, struct sk_buff*);
55306 void (*error_report)(struct neighbour *, struct sk_buff*);
55307 int (*output)(struct sk_buff*);
55308diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55309--- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55310+++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55311@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55312 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55313 {
55314 if (mark)
55315- skb_trim(skb, (unsigned char *) mark - skb->data);
55316+ skb_trim(skb, (const unsigned char *) mark - skb->data);
55317 }
55318
55319 /**
55320diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55321--- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55322+++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55323@@ -54,8 +54,8 @@ struct netns_ipv4 {
55324 int sysctl_rt_cache_rebuild_count;
55325 int current_rt_cache_rebuild_count;
55326
55327- atomic_t rt_genid;
55328- atomic_t dev_addr_genid;
55329+ atomic_unchecked_t rt_genid;
55330+ atomic_unchecked_t dev_addr_genid;
55331
55332 #ifdef CONFIG_IP_MROUTE
55333 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55334diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55335--- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55336+++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55337@@ -316,9 +316,9 @@ do { \
55338
55339 #else /* SCTP_DEBUG */
55340
55341-#define SCTP_DEBUG_PRINTK(whatever...)
55342-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55343-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55344+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55345+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55346+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55347 #define SCTP_ENABLE_DEBUG
55348 #define SCTP_DISABLE_DEBUG
55349 #define SCTP_ASSERT(expr, str, func)
55350diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55351--- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55352+++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55353@@ -277,7 +277,7 @@ struct sock {
55354 #ifdef CONFIG_RPS
55355 __u32 sk_rxhash;
55356 #endif
55357- atomic_t sk_drops;
55358+ atomic_unchecked_t sk_drops;
55359 int sk_rcvbuf;
55360
55361 struct sk_filter __rcu *sk_filter;
55362diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55363--- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55364+++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55365@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55366 struct tcp_seq_afinfo {
55367 char *name;
55368 sa_family_t family;
55369- struct file_operations seq_fops;
55370- struct seq_operations seq_ops;
55371+ file_operations_no_const seq_fops;
55372+ seq_operations_no_const seq_ops;
55373 };
55374
55375 struct tcp_iter_state {
55376diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55377--- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55378+++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55379@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55380 char *name;
55381 sa_family_t family;
55382 struct udp_table *udp_table;
55383- struct file_operations seq_fops;
55384- struct seq_operations seq_ops;
55385+ file_operations_no_const seq_fops;
55386+ seq_operations_no_const seq_ops;
55387 };
55388
55389 struct udp_iter_state {
55390diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55391--- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55392+++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55393@@ -505,7 +505,7 @@ struct xfrm_policy {
55394 struct timer_list timer;
55395
55396 struct flow_cache_object flo;
55397- atomic_t genid;
55398+ atomic_unchecked_t genid;
55399 u32 priority;
55400 u32 index;
55401 struct xfrm_mark mark;
55402diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55403--- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55404+++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55405@@ -129,7 +129,7 @@ struct iw_cm_verbs {
55406 int backlog);
55407
55408 int (*destroy_listen)(struct iw_cm_id *cm_id);
55409-};
55410+} __no_const;
55411
55412 /**
55413 * iw_create_cm_id - Create an IW CM identifier.
55414diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55415--- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55416+++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55417@@ -750,6 +750,7 @@ struct libfc_function_template {
55418 */
55419 void (*disc_stop_final) (struct fc_lport *);
55420 };
55421+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55422
55423 /**
55424 * struct fc_disc - Discovery context
55425@@ -853,7 +854,7 @@ struct fc_lport {
55426 struct fc_vport *vport;
55427
55428 /* Operational Information */
55429- struct libfc_function_template tt;
55430+ libfc_function_template_no_const tt;
55431 u8 link_up;
55432 u8 qfull;
55433 enum fc_lport_state state;
55434diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55435--- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55436+++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55437@@ -161,9 +161,9 @@ struct scsi_device {
55438 unsigned int max_device_blocked; /* what device_blocked counts down from */
55439 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55440
55441- atomic_t iorequest_cnt;
55442- atomic_t iodone_cnt;
55443- atomic_t ioerr_cnt;
55444+ atomic_unchecked_t iorequest_cnt;
55445+ atomic_unchecked_t iodone_cnt;
55446+ atomic_unchecked_t ioerr_cnt;
55447
55448 struct device sdev_gendev,
55449 sdev_dev;
55450diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55451--- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55452+++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55453@@ -666,9 +666,9 @@ struct fc_function_template {
55454 int (*bsg_timeout)(struct fc_bsg_job *);
55455
55456 /* allocation lengths for host-specific data */
55457- u32 dd_fcrport_size;
55458- u32 dd_fcvport_size;
55459- u32 dd_bsg_size;
55460+ const u32 dd_fcrport_size;
55461+ const u32 dd_fcvport_size;
55462+ const u32 dd_bsg_size;
55463
55464 /*
55465 * The driver sets these to tell the transport class it
55466@@ -678,39 +678,39 @@ struct fc_function_template {
55467 */
55468
55469 /* remote port fixed attributes */
55470- unsigned long show_rport_maxframe_size:1;
55471- unsigned long show_rport_supported_classes:1;
55472- unsigned long show_rport_dev_loss_tmo:1;
55473+ const unsigned long show_rport_maxframe_size:1;
55474+ const unsigned long show_rport_supported_classes:1;
55475+ const unsigned long show_rport_dev_loss_tmo:1;
55476
55477 /*
55478 * target dynamic attributes
55479 * These should all be "1" if the driver uses the remote port
55480 * add/delete functions (so attributes reflect rport values).
55481 */
55482- unsigned long show_starget_node_name:1;
55483- unsigned long show_starget_port_name:1;
55484- unsigned long show_starget_port_id:1;
55485+ const unsigned long show_starget_node_name:1;
55486+ const unsigned long show_starget_port_name:1;
55487+ const unsigned long show_starget_port_id:1;
55488
55489 /* host fixed attributes */
55490- unsigned long show_host_node_name:1;
55491- unsigned long show_host_port_name:1;
55492- unsigned long show_host_permanent_port_name:1;
55493- unsigned long show_host_supported_classes:1;
55494- unsigned long show_host_supported_fc4s:1;
55495- unsigned long show_host_supported_speeds:1;
55496- unsigned long show_host_maxframe_size:1;
55497- unsigned long show_host_serial_number:1;
55498+ const unsigned long show_host_node_name:1;
55499+ const unsigned long show_host_port_name:1;
55500+ const unsigned long show_host_permanent_port_name:1;
55501+ const unsigned long show_host_supported_classes:1;
55502+ const unsigned long show_host_supported_fc4s:1;
55503+ const unsigned long show_host_supported_speeds:1;
55504+ const unsigned long show_host_maxframe_size:1;
55505+ const unsigned long show_host_serial_number:1;
55506 /* host dynamic attributes */
55507- unsigned long show_host_port_id:1;
55508- unsigned long show_host_port_type:1;
55509- unsigned long show_host_port_state:1;
55510- unsigned long show_host_active_fc4s:1;
55511- unsigned long show_host_speed:1;
55512- unsigned long show_host_fabric_name:1;
55513- unsigned long show_host_symbolic_name:1;
55514- unsigned long show_host_system_hostname:1;
55515+ const unsigned long show_host_port_id:1;
55516+ const unsigned long show_host_port_type:1;
55517+ const unsigned long show_host_port_state:1;
55518+ const unsigned long show_host_active_fc4s:1;
55519+ const unsigned long show_host_speed:1;
55520+ const unsigned long show_host_fabric_name:1;
55521+ const unsigned long show_host_symbolic_name:1;
55522+ const unsigned long show_host_system_hostname:1;
55523
55524- unsigned long disable_target_scan:1;
55525+ const unsigned long disable_target_scan:1;
55526 };
55527
55528
55529diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55530--- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55531+++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55532@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55533 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55534 unsigned char val);
55535 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55536-};
55537+} __no_const;
55538
55539 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55540
55541diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55542--- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55543+++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55544@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55545 struct snd_hwdep_dsp_status *status);
55546 int (*dsp_load)(struct snd_hwdep *hw,
55547 struct snd_hwdep_dsp_image *image);
55548-};
55549+} __no_const;
55550
55551 struct snd_hwdep {
55552 struct snd_card *card;
55553diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55554--- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55555+++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55556@@ -44,7 +44,7 @@ struct snd_info_entry_text {
55557 struct snd_info_buffer *buffer);
55558 void (*write)(struct snd_info_entry *entry,
55559 struct snd_info_buffer *buffer);
55560-};
55561+} __no_const;
55562
55563 struct snd_info_entry_ops {
55564 int (*open)(struct snd_info_entry *entry,
55565diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55566--- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55567+++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55568@@ -81,6 +81,7 @@ struct snd_pcm_ops {
55569 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55570 int (*ack)(struct snd_pcm_substream *substream);
55571 };
55572+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55573
55574 /*
55575 *
55576diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55577--- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55578+++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55579@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55580 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55581 int (*csp_stop) (struct snd_sb_csp * p);
55582 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55583-};
55584+} __no_const;
55585
55586 /*
55587 * CSP private data
55588diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55589--- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55590+++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55591@@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55592 struct snd_soc_dai *);
55593
55594 /* platform stream ops */
55595- struct snd_pcm_ops *ops;
55596+ struct snd_pcm_ops * const ops;
55597 };
55598
55599 struct snd_soc_platform {
55600diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55601--- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55602+++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55603@@ -358,7 +358,7 @@ struct snd_ymfpci {
55604 spinlock_t reg_lock;
55605 spinlock_t voice_lock;
55606 wait_queue_head_t interrupt_sleep;
55607- atomic_t interrupt_sleep_count;
55608+ atomic_unchecked_t interrupt_sleep_count;
55609 struct snd_info_entry *proc_entry;
55610 const struct firmware *dsp_microcode;
55611 const struct firmware *controller_microcode;
55612diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55613--- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55614+++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55615@@ -364,7 +364,7 @@ struct t10_reservation_ops {
55616 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55617 int (*t10_pr_register)(struct se_cmd *);
55618 int (*t10_pr_clear)(struct se_cmd *);
55619-};
55620+} __no_const;
55621
55622 struct t10_reservation_template {
55623 /* Reservation effects all target ports */
55624@@ -432,8 +432,8 @@ struct se_transport_task {
55625 atomic_t t_task_cdbs_left;
55626 atomic_t t_task_cdbs_ex_left;
55627 atomic_t t_task_cdbs_timeout_left;
55628- atomic_t t_task_cdbs_sent;
55629- atomic_t t_transport_aborted;
55630+ atomic_unchecked_t t_task_cdbs_sent;
55631+ atomic_unchecked_t t_transport_aborted;
55632 atomic_t t_transport_active;
55633 atomic_t t_transport_complete;
55634 atomic_t t_transport_queue_active;
55635@@ -774,7 +774,7 @@ struct se_device {
55636 atomic_t active_cmds;
55637 atomic_t simple_cmds;
55638 atomic_t depth_left;
55639- atomic_t dev_ordered_id;
55640+ atomic_unchecked_t dev_ordered_id;
55641 atomic_t dev_tur_active;
55642 atomic_t execute_tasks;
55643 atomic_t dev_status_thr_count;
55644diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55645--- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55646+++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55647@@ -36,7 +36,7 @@ struct softirq_action;
55648 */
55649 TRACE_EVENT(irq_handler_entry,
55650
55651- TP_PROTO(int irq, struct irqaction *action),
55652+ TP_PROTO(int irq, const struct irqaction *action),
55653
55654 TP_ARGS(irq, action),
55655
55656@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55657 */
55658 TRACE_EVENT(irq_handler_exit,
55659
55660- TP_PROTO(int irq, struct irqaction *action, int ret),
55661+ TP_PROTO(int irq, const struct irqaction *action, int ret),
55662
55663 TP_ARGS(irq, action, ret),
55664
55665diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55666--- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55667+++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55668@@ -51,10 +51,10 @@ struct dlfb_data {
55669 int base8;
55670 u32 pseudo_palette[256];
55671 /* blit-only rendering path metrics, exposed through sysfs */
55672- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55673- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55674- atomic_t bytes_sent; /* to usb, after compression including overhead */
55675- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55676+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55677+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55678+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55679+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55680 };
55681
55682 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55683diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55684--- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55685+++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55686@@ -177,6 +177,7 @@ struct uvesafb_par {
55687 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55688 u8 pmi_setpal; /* PMI for palette changes */
55689 u16 *pmi_base; /* protected mode interface location */
55690+ u8 *pmi_code; /* protected mode code location */
55691 void *pmi_start;
55692 void *pmi_pal;
55693 u8 *vbe_state_orig; /*
55694diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55695--- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55696+++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55697@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55698
55699 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55700 {
55701- int err = sys_mount(name, "/root", fs, flags, data);
55702+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55703 if (err)
55704 return err;
55705
55706@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55707 va_start(args, fmt);
55708 vsprintf(buf, fmt, args);
55709 va_end(args);
55710- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55711+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55712 if (fd >= 0) {
55713 sys_ioctl(fd, FDEJECT, 0);
55714 sys_close(fd);
55715 }
55716 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55717- fd = sys_open("/dev/console", O_RDWR, 0);
55718+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55719 if (fd >= 0) {
55720 sys_ioctl(fd, TCGETS, (long)&termios);
55721 termios.c_lflag &= ~ICANON;
55722 sys_ioctl(fd, TCSETSF, (long)&termios);
55723- sys_read(fd, &c, 1);
55724+ sys_read(fd, (char __user *)&c, 1);
55725 termios.c_lflag |= ICANON;
55726 sys_ioctl(fd, TCSETSF, (long)&termios);
55727 sys_close(fd);
55728@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55729 mount_root();
55730 out:
55731 devtmpfs_mount("dev");
55732- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55733+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55734 sys_chroot((const char __user __force *)".");
55735 }
55736diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55737--- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55738+++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55739@@ -15,15 +15,15 @@ extern int root_mountflags;
55740
55741 static inline int create_dev(char *name, dev_t dev)
55742 {
55743- sys_unlink(name);
55744- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55745+ sys_unlink((__force char __user *)name);
55746+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55747 }
55748
55749 #if BITS_PER_LONG == 32
55750 static inline u32 bstat(char *name)
55751 {
55752 struct stat64 stat;
55753- if (sys_stat64(name, &stat) != 0)
55754+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55755 return 0;
55756 if (!S_ISBLK(stat.st_mode))
55757 return 0;
55758diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55759--- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55760+++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55761@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55762 create_dev("/dev/root.old", Root_RAM0);
55763 /* mount initrd on rootfs' /root */
55764 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55765- sys_mkdir("/old", 0700);
55766- root_fd = sys_open("/", 0, 0);
55767- old_fd = sys_open("/old", 0, 0);
55768+ sys_mkdir((__force const char __user *)"/old", 0700);
55769+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
55770+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55771 /* move initrd over / and chdir/chroot in initrd root */
55772- sys_chdir("/root");
55773- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55774- sys_chroot(".");
55775+ sys_chdir((__force const char __user *)"/root");
55776+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55777+ sys_chroot((__force const char __user *)".");
55778
55779 /*
55780 * In case that a resume from disk is carried out by linuxrc or one of
55781@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55782
55783 /* move initrd to rootfs' /old */
55784 sys_fchdir(old_fd);
55785- sys_mount("/", ".", NULL, MS_MOVE, NULL);
55786+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55787 /* switch root and cwd back to / of rootfs */
55788 sys_fchdir(root_fd);
55789- sys_chroot(".");
55790+ sys_chroot((__force const char __user *)".");
55791 sys_close(old_fd);
55792 sys_close(root_fd);
55793
55794 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55795- sys_chdir("/old");
55796+ sys_chdir((__force const char __user *)"/old");
55797 return;
55798 }
55799
55800@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55801 mount_root();
55802
55803 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55804- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55805+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55806 if (!error)
55807 printk("okay\n");
55808 else {
55809- int fd = sys_open("/dev/root.old", O_RDWR, 0);
55810+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55811 if (error == -ENOENT)
55812 printk("/initrd does not exist. Ignored.\n");
55813 else
55814 printk("failed\n");
55815 printk(KERN_NOTICE "Unmounting old root\n");
55816- sys_umount("/old", MNT_DETACH);
55817+ sys_umount((__force char __user *)"/old", MNT_DETACH);
55818 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55819 if (fd < 0) {
55820 error = fd;
55821@@ -116,11 +116,11 @@ int __init initrd_load(void)
55822 * mounted in the normal path.
55823 */
55824 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55825- sys_unlink("/initrd.image");
55826+ sys_unlink((__force const char __user *)"/initrd.image");
55827 handle_initrd();
55828 return 1;
55829 }
55830 }
55831- sys_unlink("/initrd.image");
55832+ sys_unlink((__force const char __user *)"/initrd.image");
55833 return 0;
55834 }
55835diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55836--- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55837+++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55838@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55839 partitioned ? "_d" : "", minor,
55840 md_setup_args[ent].device_names);
55841
55842- fd = sys_open(name, 0, 0);
55843+ fd = sys_open((__force char __user *)name, 0, 0);
55844 if (fd < 0) {
55845 printk(KERN_ERR "md: open failed - cannot start "
55846 "array %s\n", name);
55847@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55848 * array without it
55849 */
55850 sys_close(fd);
55851- fd = sys_open(name, 0, 0);
55852+ fd = sys_open((__force char __user *)name, 0, 0);
55853 sys_ioctl(fd, BLKRRPART, 0);
55854 }
55855 sys_close(fd);
55856diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55857--- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55858+++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55859@@ -74,7 +74,7 @@ static void __init free_hash(void)
55860 }
55861 }
55862
55863-static long __init do_utime(char __user *filename, time_t mtime)
55864+static long __init do_utime(__force char __user *filename, time_t mtime)
55865 {
55866 struct timespec t[2];
55867
55868@@ -109,7 +109,7 @@ static void __init dir_utime(void)
55869 struct dir_entry *de, *tmp;
55870 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55871 list_del(&de->list);
55872- do_utime(de->name, de->mtime);
55873+ do_utime((__force char __user *)de->name, de->mtime);
55874 kfree(de->name);
55875 kfree(de);
55876 }
55877@@ -271,7 +271,7 @@ static int __init maybe_link(void)
55878 if (nlink >= 2) {
55879 char *old = find_link(major, minor, ino, mode, collected);
55880 if (old)
55881- return (sys_link(old, collected) < 0) ? -1 : 1;
55882+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55883 }
55884 return 0;
55885 }
55886@@ -280,11 +280,11 @@ static void __init clean_path(char *path
55887 {
55888 struct stat st;
55889
55890- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55891+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55892 if (S_ISDIR(st.st_mode))
55893- sys_rmdir(path);
55894+ sys_rmdir((__force char __user *)path);
55895 else
55896- sys_unlink(path);
55897+ sys_unlink((__force char __user *)path);
55898 }
55899 }
55900
55901@@ -305,7 +305,7 @@ static int __init do_name(void)
55902 int openflags = O_WRONLY|O_CREAT;
55903 if (ml != 1)
55904 openflags |= O_TRUNC;
55905- wfd = sys_open(collected, openflags, mode);
55906+ wfd = sys_open((__force char __user *)collected, openflags, mode);
55907
55908 if (wfd >= 0) {
55909 sys_fchown(wfd, uid, gid);
55910@@ -317,17 +317,17 @@ static int __init do_name(void)
55911 }
55912 }
55913 } else if (S_ISDIR(mode)) {
55914- sys_mkdir(collected, mode);
55915- sys_chown(collected, uid, gid);
55916- sys_chmod(collected, mode);
55917+ sys_mkdir((__force char __user *)collected, mode);
55918+ sys_chown((__force char __user *)collected, uid, gid);
55919+ sys_chmod((__force char __user *)collected, mode);
55920 dir_add(collected, mtime);
55921 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55922 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55923 if (maybe_link() == 0) {
55924- sys_mknod(collected, mode, rdev);
55925- sys_chown(collected, uid, gid);
55926- sys_chmod(collected, mode);
55927- do_utime(collected, mtime);
55928+ sys_mknod((__force char __user *)collected, mode, rdev);
55929+ sys_chown((__force char __user *)collected, uid, gid);
55930+ sys_chmod((__force char __user *)collected, mode);
55931+ do_utime((__force char __user *)collected, mtime);
55932 }
55933 }
55934 return 0;
55935@@ -336,15 +336,15 @@ static int __init do_name(void)
55936 static int __init do_copy(void)
55937 {
55938 if (count >= body_len) {
55939- sys_write(wfd, victim, body_len);
55940+ sys_write(wfd, (__force char __user *)victim, body_len);
55941 sys_close(wfd);
55942- do_utime(vcollected, mtime);
55943+ do_utime((__force char __user *)vcollected, mtime);
55944 kfree(vcollected);
55945 eat(body_len);
55946 state = SkipIt;
55947 return 0;
55948 } else {
55949- sys_write(wfd, victim, count);
55950+ sys_write(wfd, (__force char __user *)victim, count);
55951 body_len -= count;
55952 eat(count);
55953 return 1;
55954@@ -355,9 +355,9 @@ static int __init do_symlink(void)
55955 {
55956 collected[N_ALIGN(name_len) + body_len] = '\0';
55957 clean_path(collected, 0);
55958- sys_symlink(collected + N_ALIGN(name_len), collected);
55959- sys_lchown(collected, uid, gid);
55960- do_utime(collected, mtime);
55961+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55962+ sys_lchown((__force char __user *)collected, uid, gid);
55963+ do_utime((__force char __user *)collected, mtime);
55964 state = SkipIt;
55965 next_state = Reset;
55966 return 0;
55967diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55968--- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55969+++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55970@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55971
55972 config COMPAT_BRK
55973 bool "Disable heap randomization"
55974- default y
55975+ default n
55976 help
55977 Randomizing heap placement makes heap exploits harder, but it
55978 also breaks ancient binaries (including anything libc5 based).
55979diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
55980--- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
55981+++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
55982@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55983 extern void tc_init(void);
55984 #endif
55985
55986+extern void grsecurity_init(void);
55987+
55988 /*
55989 * Debug helper: via this flag we know that we are in 'early bootup code'
55990 * where only the boot processor is running with IRQ disabled. This means
55991@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55992
55993 __setup("reset_devices", set_reset_devices);
55994
55995+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55996+extern char pax_enter_kernel_user[];
55997+extern char pax_exit_kernel_user[];
55998+extern pgdval_t clone_pgd_mask;
55999+#endif
56000+
56001+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
56002+static int __init setup_pax_nouderef(char *str)
56003+{
56004+#ifdef CONFIG_X86_32
56005+ unsigned int cpu;
56006+ struct desc_struct *gdt;
56007+
56008+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
56009+ gdt = get_cpu_gdt_table(cpu);
56010+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
56011+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
56012+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
56013+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
56014+ }
56015+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
56016+#else
56017+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
56018+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
56019+ clone_pgd_mask = ~(pgdval_t)0UL;
56020+#endif
56021+
56022+ return 0;
56023+}
56024+early_param("pax_nouderef", setup_pax_nouderef);
56025+#endif
56026+
56027+#ifdef CONFIG_PAX_SOFTMODE
56028+int pax_softmode;
56029+
56030+static int __init setup_pax_softmode(char *str)
56031+{
56032+ get_option(&str, &pax_softmode);
56033+ return 1;
56034+}
56035+__setup("pax_softmode=", setup_pax_softmode);
56036+#endif
56037+
56038 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
56039 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
56040 static const char *panic_later, *panic_param;
56041@@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
56042 {
56043 int count = preempt_count();
56044 int ret;
56045+ const char *msg1 = "", *msg2 = "";
56046
56047 if (initcall_debug)
56048 ret = do_one_initcall_debug(fn);
56049@@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
56050 sprintf(msgbuf, "error code %d ", ret);
56051
56052 if (preempt_count() != count) {
56053- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
56054+ msg1 = " preemption imbalance";
56055 preempt_count() = count;
56056 }
56057 if (irqs_disabled()) {
56058- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
56059+ msg2 = " disabled interrupts";
56060 local_irq_enable();
56061 }
56062- if (msgbuf[0]) {
56063- printk("initcall %pF returned with %s\n", fn, msgbuf);
56064+ if (msgbuf[0] || *msg1 || *msg2) {
56065+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
56066 }
56067
56068 return ret;
56069@@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
56070 do_basic_setup();
56071
56072 /* Open the /dev/console on the rootfs, this should never fail */
56073- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
56074+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
56075 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
56076
56077 (void) sys_dup(0);
56078@@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
56079 if (!ramdisk_execute_command)
56080 ramdisk_execute_command = "/init";
56081
56082- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
56083+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
56084 ramdisk_execute_command = NULL;
56085 prepare_namespace();
56086 }
56087
56088+ grsecurity_init();
56089+
56090 /*
56091 * Ok, we have completed the initial bootup, and
56092 * we're essentially up and running. Get rid of the
56093diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
56094--- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
56095+++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
56096@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
56097 mq_bytes = (mq_msg_tblsz +
56098 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
56099
56100+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
56101 spin_lock(&mq_lock);
56102 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
56103 u->mq_bytes + mq_bytes >
56104diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
56105--- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
56106+++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
56107@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
56108 return security_msg_queue_associate(msq, msgflg);
56109 }
56110
56111+static struct ipc_ops msg_ops = {
56112+ .getnew = newque,
56113+ .associate = msg_security,
56114+ .more_checks = NULL
56115+};
56116+
56117 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
56118 {
56119 struct ipc_namespace *ns;
56120- struct ipc_ops msg_ops;
56121 struct ipc_params msg_params;
56122
56123 ns = current->nsproxy->ipc_ns;
56124
56125- msg_ops.getnew = newque;
56126- msg_ops.associate = msg_security;
56127- msg_ops.more_checks = NULL;
56128-
56129 msg_params.key = key;
56130 msg_params.flg = msgflg;
56131
56132diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
56133--- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
56134+++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
56135@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
56136 return 0;
56137 }
56138
56139+static struct ipc_ops sem_ops = {
56140+ .getnew = newary,
56141+ .associate = sem_security,
56142+ .more_checks = sem_more_checks
56143+};
56144+
56145 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
56146 {
56147 struct ipc_namespace *ns;
56148- struct ipc_ops sem_ops;
56149 struct ipc_params sem_params;
56150
56151 ns = current->nsproxy->ipc_ns;
56152@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56153 if (nsems < 0 || nsems > ns->sc_semmsl)
56154 return -EINVAL;
56155
56156- sem_ops.getnew = newary;
56157- sem_ops.associate = sem_security;
56158- sem_ops.more_checks = sem_more_checks;
56159-
56160 sem_params.key = key;
56161 sem_params.flg = semflg;
56162 sem_params.u.nsems = nsems;
56163@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56164 int nsems;
56165 struct list_head tasks;
56166
56167+ pax_track_stack();
56168+
56169 sma = sem_lock_check(ns, semid);
56170 if (IS_ERR(sma))
56171 return PTR_ERR(sma);
56172@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56173 struct ipc_namespace *ns;
56174 struct list_head tasks;
56175
56176+ pax_track_stack();
56177+
56178 ns = current->nsproxy->ipc_ns;
56179
56180 if (nsops < 1 || semid < 0)
56181diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56182--- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56183+++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56184@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56185 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56186 #endif
56187
56188+#ifdef CONFIG_GRKERNSEC
56189+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56190+ const time_t shm_createtime, const uid_t cuid,
56191+ const int shmid);
56192+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56193+ const time_t shm_createtime);
56194+#endif
56195+
56196 void shm_init_ns(struct ipc_namespace *ns)
56197 {
56198 ns->shm_ctlmax = SHMMAX;
56199@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56200 shp->shm_lprid = 0;
56201 shp->shm_atim = shp->shm_dtim = 0;
56202 shp->shm_ctim = get_seconds();
56203+#ifdef CONFIG_GRKERNSEC
56204+ {
56205+ struct timespec timeval;
56206+ do_posix_clock_monotonic_gettime(&timeval);
56207+
56208+ shp->shm_createtime = timeval.tv_sec;
56209+ }
56210+#endif
56211 shp->shm_segsz = size;
56212 shp->shm_nattch = 0;
56213 shp->shm_file = file;
56214@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56215 return 0;
56216 }
56217
56218+static struct ipc_ops shm_ops = {
56219+ .getnew = newseg,
56220+ .associate = shm_security,
56221+ .more_checks = shm_more_checks
56222+};
56223+
56224 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56225 {
56226 struct ipc_namespace *ns;
56227- struct ipc_ops shm_ops;
56228 struct ipc_params shm_params;
56229
56230 ns = current->nsproxy->ipc_ns;
56231
56232- shm_ops.getnew = newseg;
56233- shm_ops.associate = shm_security;
56234- shm_ops.more_checks = shm_more_checks;
56235-
56236 shm_params.key = key;
56237 shm_params.flg = shmflg;
56238 shm_params.u.size = size;
56239@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56240 case SHM_LOCK:
56241 case SHM_UNLOCK:
56242 {
56243- struct file *uninitialized_var(shm_file);
56244-
56245 lru_add_drain_all(); /* drain pagevecs to lru lists */
56246
56247 shp = shm_lock_check(ns, shmid);
56248@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56249 if (err)
56250 goto out_unlock;
56251
56252+#ifdef CONFIG_GRKERNSEC
56253+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56254+ shp->shm_perm.cuid, shmid) ||
56255+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56256+ err = -EACCES;
56257+ goto out_unlock;
56258+ }
56259+#endif
56260+
56261 path = shp->shm_file->f_path;
56262 path_get(&path);
56263 shp->shm_nattch++;
56264+#ifdef CONFIG_GRKERNSEC
56265+ shp->shm_lapid = current->pid;
56266+#endif
56267 size = i_size_read(path.dentry->d_inode);
56268 shm_unlock(shp);
56269
56270diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56271--- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56272+++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56273@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56274 */
56275 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56276 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56277- file->f_op->write(file, (char *)&ac,
56278+ file->f_op->write(file, (__force char __user *)&ac,
56279 sizeof(acct_t), &file->f_pos);
56280 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56281 set_fs(fs);
56282diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56283--- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56284+++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56285@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56286 3) suppressed due to audit_rate_limit
56287 4) suppressed due to audit_backlog_limit
56288 */
56289-static atomic_t audit_lost = ATOMIC_INIT(0);
56290+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56291
56292 /* The netlink socket. */
56293 static struct sock *audit_sock;
56294@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56295 unsigned long now;
56296 int print;
56297
56298- atomic_inc(&audit_lost);
56299+ atomic_inc_unchecked(&audit_lost);
56300
56301 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56302
56303@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56304 printk(KERN_WARNING
56305 "audit: audit_lost=%d audit_rate_limit=%d "
56306 "audit_backlog_limit=%d\n",
56307- atomic_read(&audit_lost),
56308+ atomic_read_unchecked(&audit_lost),
56309 audit_rate_limit,
56310 audit_backlog_limit);
56311 audit_panic(message);
56312@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56313 status_set.pid = audit_pid;
56314 status_set.rate_limit = audit_rate_limit;
56315 status_set.backlog_limit = audit_backlog_limit;
56316- status_set.lost = atomic_read(&audit_lost);
56317+ status_set.lost = atomic_read_unchecked(&audit_lost);
56318 status_set.backlog = skb_queue_len(&audit_skb_queue);
56319 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56320 &status_set, sizeof(status_set));
56321diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56322--- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56323+++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56324@@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56325 }
56326
56327 /* global counter which is incremented every time something logs in */
56328-static atomic_t session_id = ATOMIC_INIT(0);
56329+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56330
56331 /**
56332 * audit_set_loginuid - set a task's audit_context loginuid
56333@@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56334 */
56335 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56336 {
56337- unsigned int sessionid = atomic_inc_return(&session_id);
56338+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56339 struct audit_context *context = task->audit_context;
56340
56341 if (context && context->in_syscall) {
56342diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56343--- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56344+++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56345@@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56346 * before modification is attempted and the application
56347 * fails.
56348 */
56349+ if (tocopy > ARRAY_SIZE(kdata))
56350+ return -EFAULT;
56351+
56352 if (copy_to_user(dataptr, kdata, tocopy
56353 * sizeof(struct __user_cap_data_struct))) {
56354 return -EFAULT;
56355@@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56356 BUG();
56357 }
56358
56359- if (security_capable(ns, current_cred(), cap) == 0) {
56360+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56361 current->flags |= PF_SUPERPRIV;
56362 return true;
56363 }
56364@@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56365 }
56366 EXPORT_SYMBOL(ns_capable);
56367
56368+bool ns_capable_nolog(struct user_namespace *ns, int cap)
56369+{
56370+ if (unlikely(!cap_valid(cap))) {
56371+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56372+ BUG();
56373+ }
56374+
56375+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56376+ current->flags |= PF_SUPERPRIV;
56377+ return true;
56378+ }
56379+ return false;
56380+}
56381+EXPORT_SYMBOL(ns_capable_nolog);
56382+
56383+bool capable_nolog(int cap)
56384+{
56385+ return ns_capable_nolog(&init_user_ns, cap);
56386+}
56387+EXPORT_SYMBOL(capable_nolog);
56388+
56389 /**
56390 * task_ns_capable - Determine whether current task has a superior
56391 * capability targeted at a specific task's user namespace.
56392@@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56393 }
56394 EXPORT_SYMBOL(task_ns_capable);
56395
56396+bool task_ns_capable_nolog(struct task_struct *t, int cap)
56397+{
56398+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56399+}
56400+EXPORT_SYMBOL(task_ns_capable_nolog);
56401+
56402 /**
56403 * nsown_capable - Check superior capability to one's own user_ns
56404 * @cap: The capability in question
56405diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56406--- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56407+++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56408@@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56409 struct hlist_head *hhead;
56410 struct cg_cgroup_link *link;
56411
56412+ pax_track_stack();
56413+
56414 /* First see if we already have a cgroup group that matches
56415 * the desired set */
56416 read_lock(&css_set_lock);
56417diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56418--- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56419+++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56420@@ -13,6 +13,7 @@
56421
56422 #include <linux/linkage.h>
56423 #include <linux/compat.h>
56424+#include <linux/module.h>
56425 #include <linux/errno.h>
56426 #include <linux/time.h>
56427 #include <linux/signal.h>
56428diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56429--- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56430+++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56431@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56432 struct proc_dir_entry *entry;
56433
56434 /* create the current config file */
56435+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56436+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56437+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56438+ &ikconfig_file_ops);
56439+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56440+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56441+ &ikconfig_file_ops);
56442+#endif
56443+#else
56444 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56445 &ikconfig_file_ops);
56446+#endif
56447+
56448 if (!entry)
56449 return -ENOMEM;
56450
56451diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56452--- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56453+++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56454@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56455 */
56456 void __put_cred(struct cred *cred)
56457 {
56458+ pax_track_stack();
56459+
56460 kdebug("__put_cred(%p{%d,%d})", cred,
56461 atomic_read(&cred->usage),
56462 read_cred_subscribers(cred));
56463@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56464 {
56465 struct cred *cred;
56466
56467+ pax_track_stack();
56468+
56469 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56470 atomic_read(&tsk->cred->usage),
56471 read_cred_subscribers(tsk->cred));
56472@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56473 {
56474 const struct cred *cred;
56475
56476+ pax_track_stack();
56477+
56478 rcu_read_lock();
56479
56480 do {
56481@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56482 {
56483 struct cred *new;
56484
56485+ pax_track_stack();
56486+
56487 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56488 if (!new)
56489 return NULL;
56490@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56491 const struct cred *old;
56492 struct cred *new;
56493
56494+ pax_track_stack();
56495+
56496 validate_process_creds();
56497
56498 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56499@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56500 struct thread_group_cred *tgcred = NULL;
56501 struct cred *new;
56502
56503+ pax_track_stack();
56504+
56505 #ifdef CONFIG_KEYS
56506 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56507 if (!tgcred)
56508@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56509 struct cred *new;
56510 int ret;
56511
56512+ pax_track_stack();
56513+
56514 if (
56515 #ifdef CONFIG_KEYS
56516 !p->cred->thread_keyring &&
56517@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56518 struct task_struct *task = current;
56519 const struct cred *old = task->real_cred;
56520
56521+ pax_track_stack();
56522+
56523 kdebug("commit_creds(%p{%d,%d})", new,
56524 atomic_read(&new->usage),
56525 read_cred_subscribers(new));
56526@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56527
56528 get_cred(new); /* we will require a ref for the subj creds too */
56529
56530+ gr_set_role_label(task, new->uid, new->gid);
56531+
56532 /* dumpability changes */
56533 if (old->euid != new->euid ||
56534 old->egid != new->egid ||
56535@@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56536 */
56537 void abort_creds(struct cred *new)
56538 {
56539+ pax_track_stack();
56540+
56541 kdebug("abort_creds(%p{%d,%d})", new,
56542 atomic_read(&new->usage),
56543 read_cred_subscribers(new));
56544@@ -574,6 +594,8 @@ const struct cred *override_creds(const
56545 {
56546 const struct cred *old = current->cred;
56547
56548+ pax_track_stack();
56549+
56550 kdebug("override_creds(%p{%d,%d})", new,
56551 atomic_read(&new->usage),
56552 read_cred_subscribers(new));
56553@@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56554 {
56555 const struct cred *override = current->cred;
56556
56557+ pax_track_stack();
56558+
56559 kdebug("revert_creds(%p{%d,%d})", old,
56560 atomic_read(&old->usage),
56561 read_cred_subscribers(old));
56562@@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56563 const struct cred *old;
56564 struct cred *new;
56565
56566+ pax_track_stack();
56567+
56568 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56569 if (!new)
56570 return NULL;
56571@@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56572 */
56573 int set_security_override(struct cred *new, u32 secid)
56574 {
56575+ pax_track_stack();
56576+
56577 return security_kernel_act_as(new, secid);
56578 }
56579 EXPORT_SYMBOL(set_security_override);
56580@@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56581 u32 secid;
56582 int ret;
56583
56584+ pax_track_stack();
56585+
56586 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56587 if (ret < 0)
56588 return ret;
56589diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56590--- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56591+++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56592@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56593 */
56594 static atomic_t masters_in_kgdb;
56595 static atomic_t slaves_in_kgdb;
56596-static atomic_t kgdb_break_tasklet_var;
56597+static atomic_unchecked_t kgdb_break_tasklet_var;
56598 atomic_t kgdb_setting_breakpoint;
56599
56600 struct task_struct *kgdb_usethread;
56601@@ -129,7 +129,7 @@ int kgdb_single_step;
56602 static pid_t kgdb_sstep_pid;
56603
56604 /* to keep track of the CPU which is doing the single stepping*/
56605-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56606+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56607
56608 /*
56609 * If you are debugging a problem where roundup (the collection of
56610@@ -542,7 +542,7 @@ return_normal:
56611 * kernel will only try for the value of sstep_tries before
56612 * giving up and continuing on.
56613 */
56614- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56615+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56616 (kgdb_info[cpu].task &&
56617 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56618 atomic_set(&kgdb_active, -1);
56619@@ -636,8 +636,8 @@ cpu_master_loop:
56620 }
56621
56622 kgdb_restore:
56623- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56624- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56625+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56626+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56627 if (kgdb_info[sstep_cpu].task)
56628 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56629 else
56630@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56631 static void kgdb_tasklet_bpt(unsigned long ing)
56632 {
56633 kgdb_breakpoint();
56634- atomic_set(&kgdb_break_tasklet_var, 0);
56635+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56636 }
56637
56638 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56639
56640 void kgdb_schedule_breakpoint(void)
56641 {
56642- if (atomic_read(&kgdb_break_tasklet_var) ||
56643+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56644 atomic_read(&kgdb_active) != -1 ||
56645 atomic_read(&kgdb_setting_breakpoint))
56646 return;
56647- atomic_inc(&kgdb_break_tasklet_var);
56648+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
56649 tasklet_schedule(&kgdb_tasklet_breakpoint);
56650 }
56651 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56652diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56653--- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56654+++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56655@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56656 list_for_each_entry(mod, kdb_modules, list) {
56657
56658 kdb_printf("%-20s%8u 0x%p ", mod->name,
56659- mod->core_size, (void *)mod);
56660+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
56661 #ifdef CONFIG_MODULE_UNLOAD
56662 kdb_printf("%4d ", module_refcount(mod));
56663 #endif
56664@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56665 kdb_printf(" (Loading)");
56666 else
56667 kdb_printf(" (Live)");
56668- kdb_printf(" 0x%p", mod->module_core);
56669+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56670
56671 #ifdef CONFIG_MODULE_UNLOAD
56672 {
56673diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56674--- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56675+++ linux-2.6.39.4/kernel/exit.c 2011-08-05 19:44:37.000000000 -0400
56676@@ -57,6 +57,10 @@
56677 #include <asm/pgtable.h>
56678 #include <asm/mmu_context.h>
56679
56680+#ifdef CONFIG_GRKERNSEC
56681+extern rwlock_t grsec_exec_file_lock;
56682+#endif
56683+
56684 static void exit_mm(struct task_struct * tsk);
56685
56686 static void __unhash_process(struct task_struct *p, bool group_dead)
56687@@ -169,6 +173,8 @@ void release_task(struct task_struct * p
56688 struct task_struct *leader;
56689 int zap_leader;
56690 repeat:
56691+ gr_del_task_from_ip_table(p);
56692+
56693 tracehook_prepare_release_task(p);
56694 /* don't need to get the RCU readlock here - the process is dead and
56695 * can't be modifying its own credentials. But shut RCU-lockdep up */
56696@@ -338,11 +344,22 @@ static void reparent_to_kthreadd(void)
56697 {
56698 write_lock_irq(&tasklist_lock);
56699
56700+#ifdef CONFIG_GRKERNSEC
56701+ write_lock(&grsec_exec_file_lock);
56702+ if (current->exec_file) {
56703+ fput(current->exec_file);
56704+ current->exec_file = NULL;
56705+ }
56706+ write_unlock(&grsec_exec_file_lock);
56707+#endif
56708+
56709 ptrace_unlink(current);
56710 /* Reparent to init */
56711 current->real_parent = current->parent = kthreadd_task;
56712 list_move_tail(&current->sibling, &current->real_parent->children);
56713
56714+ gr_set_kernel_label(current);
56715+
56716 /* Set the exit signal to SIGCHLD so we signal init on exit */
56717 current->exit_signal = SIGCHLD;
56718
56719@@ -394,7 +411,7 @@ int allow_signal(int sig)
56720 * know it'll be handled, so that they don't get converted to
56721 * SIGKILL or just silently dropped.
56722 */
56723- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56724+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56725 recalc_sigpending();
56726 spin_unlock_irq(&current->sighand->siglock);
56727 return 0;
56728@@ -430,6 +447,17 @@ void daemonize(const char *name, ...)
56729 vsnprintf(current->comm, sizeof(current->comm), name, args);
56730 va_end(args);
56731
56732+#ifdef CONFIG_GRKERNSEC
56733+ write_lock(&grsec_exec_file_lock);
56734+ if (current->exec_file) {
56735+ fput(current->exec_file);
56736+ current->exec_file = NULL;
56737+ }
56738+ write_unlock(&grsec_exec_file_lock);
56739+#endif
56740+
56741+ gr_set_kernel_label(current);
56742+
56743 /*
56744 * If we were started as result of loading a module, close all of the
56745 * user space pages. We don't need them, and if we didn't close them
56746@@ -905,15 +933,8 @@ NORET_TYPE void do_exit(long code)
56747 struct task_struct *tsk = current;
56748 int group_dead;
56749
56750- profile_task_exit(tsk);
56751-
56752- WARN_ON(atomic_read(&tsk->fs_excl));
56753- WARN_ON(blk_needs_flush_plug(tsk));
56754-
56755 if (unlikely(in_interrupt()))
56756 panic("Aiee, killing interrupt handler!");
56757- if (unlikely(!tsk->pid))
56758- panic("Attempted to kill the idle task!");
56759
56760 /*
56761 * If do_exit is called because this processes oopsed, it's possible
56762@@ -924,6 +945,14 @@ NORET_TYPE void do_exit(long code)
56763 */
56764 set_fs(USER_DS);
56765
56766+ profile_task_exit(tsk);
56767+
56768+ WARN_ON(atomic_read(&tsk->fs_excl));
56769+ WARN_ON(blk_needs_flush_plug(tsk));
56770+
56771+ if (unlikely(!tsk->pid))
56772+ panic("Attempted to kill the idle task!");
56773+
56774 tracehook_report_exit(&code);
56775
56776 validate_creds_for_do_exit(tsk);
56777@@ -984,6 +1013,9 @@ NORET_TYPE void do_exit(long code)
56778 tsk->exit_code = code;
56779 taskstats_exit(tsk, group_dead);
56780
56781+ gr_acl_handle_psacct(tsk, code);
56782+ gr_acl_handle_exit();
56783+
56784 exit_mm(tsk);
56785
56786 if (group_dead)
56787diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56788--- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56789+++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56790@@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56791 *stackend = STACK_END_MAGIC; /* for overflow detection */
56792
56793 #ifdef CONFIG_CC_STACKPROTECTOR
56794- tsk->stack_canary = get_random_int();
56795+ tsk->stack_canary = pax_get_random_long();
56796 #endif
56797
56798 /* One for us, one for whoever does the "release_task()" (usually parent) */
56799@@ -309,13 +309,78 @@ out:
56800 }
56801
56802 #ifdef CONFIG_MMU
56803+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56804+{
56805+ struct vm_area_struct *tmp;
56806+ unsigned long charge;
56807+ struct mempolicy *pol;
56808+ struct file *file;
56809+
56810+ charge = 0;
56811+ if (mpnt->vm_flags & VM_ACCOUNT) {
56812+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56813+ if (security_vm_enough_memory(len))
56814+ goto fail_nomem;
56815+ charge = len;
56816+ }
56817+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56818+ if (!tmp)
56819+ goto fail_nomem;
56820+ *tmp = *mpnt;
56821+ tmp->vm_mm = mm;
56822+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
56823+ pol = mpol_dup(vma_policy(mpnt));
56824+ if (IS_ERR(pol))
56825+ goto fail_nomem_policy;
56826+ vma_set_policy(tmp, pol);
56827+ if (anon_vma_fork(tmp, mpnt))
56828+ goto fail_nomem_anon_vma_fork;
56829+ tmp->vm_flags &= ~VM_LOCKED;
56830+ tmp->vm_next = tmp->vm_prev = NULL;
56831+ tmp->vm_mirror = NULL;
56832+ file = tmp->vm_file;
56833+ if (file) {
56834+ struct inode *inode = file->f_path.dentry->d_inode;
56835+ struct address_space *mapping = file->f_mapping;
56836+
56837+ get_file(file);
56838+ if (tmp->vm_flags & VM_DENYWRITE)
56839+ atomic_dec(&inode->i_writecount);
56840+ spin_lock(&mapping->i_mmap_lock);
56841+ if (tmp->vm_flags & VM_SHARED)
56842+ mapping->i_mmap_writable++;
56843+ tmp->vm_truncate_count = mpnt->vm_truncate_count;
56844+ flush_dcache_mmap_lock(mapping);
56845+ /* insert tmp into the share list, just after mpnt */
56846+ vma_prio_tree_add(tmp, mpnt);
56847+ flush_dcache_mmap_unlock(mapping);
56848+ spin_unlock(&mapping->i_mmap_lock);
56849+ }
56850+
56851+ /*
56852+ * Clear hugetlb-related page reserves for children. This only
56853+ * affects MAP_PRIVATE mappings. Faults generated by the child
56854+ * are not guaranteed to succeed, even if read-only
56855+ */
56856+ if (is_vm_hugetlb_page(tmp))
56857+ reset_vma_resv_huge_pages(tmp);
56858+
56859+ return tmp;
56860+
56861+fail_nomem_anon_vma_fork:
56862+ mpol_put(pol);
56863+fail_nomem_policy:
56864+ kmem_cache_free(vm_area_cachep, tmp);
56865+fail_nomem:
56866+ vm_unacct_memory(charge);
56867+ return NULL;
56868+}
56869+
56870 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56871 {
56872 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56873 struct rb_node **rb_link, *rb_parent;
56874 int retval;
56875- unsigned long charge;
56876- struct mempolicy *pol;
56877
56878 down_write(&oldmm->mmap_sem);
56879 flush_cache_dup_mm(oldmm);
56880@@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56881 mm->locked_vm = 0;
56882 mm->mmap = NULL;
56883 mm->mmap_cache = NULL;
56884- mm->free_area_cache = oldmm->mmap_base;
56885- mm->cached_hole_size = ~0UL;
56886+ mm->free_area_cache = oldmm->free_area_cache;
56887+ mm->cached_hole_size = oldmm->cached_hole_size;
56888 mm->map_count = 0;
56889 cpumask_clear(mm_cpumask(mm));
56890 mm->mm_rb = RB_ROOT;
56891@@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56892
56893 prev = NULL;
56894 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56895- struct file *file;
56896-
56897 if (mpnt->vm_flags & VM_DONTCOPY) {
56898 long pages = vma_pages(mpnt);
56899 mm->total_vm -= pages;
56900@@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56901 -pages);
56902 continue;
56903 }
56904- charge = 0;
56905- if (mpnt->vm_flags & VM_ACCOUNT) {
56906- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56907- if (security_vm_enough_memory(len))
56908- goto fail_nomem;
56909- charge = len;
56910- }
56911- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56912- if (!tmp)
56913- goto fail_nomem;
56914- *tmp = *mpnt;
56915- INIT_LIST_HEAD(&tmp->anon_vma_chain);
56916- pol = mpol_dup(vma_policy(mpnt));
56917- retval = PTR_ERR(pol);
56918- if (IS_ERR(pol))
56919- goto fail_nomem_policy;
56920- vma_set_policy(tmp, pol);
56921- tmp->vm_mm = mm;
56922- if (anon_vma_fork(tmp, mpnt))
56923- goto fail_nomem_anon_vma_fork;
56924- tmp->vm_flags &= ~VM_LOCKED;
56925- tmp->vm_next = tmp->vm_prev = NULL;
56926- file = tmp->vm_file;
56927- if (file) {
56928- struct inode *inode = file->f_path.dentry->d_inode;
56929- struct address_space *mapping = file->f_mapping;
56930-
56931- get_file(file);
56932- if (tmp->vm_flags & VM_DENYWRITE)
56933- atomic_dec(&inode->i_writecount);
56934- spin_lock(&mapping->i_mmap_lock);
56935- if (tmp->vm_flags & VM_SHARED)
56936- mapping->i_mmap_writable++;
56937- tmp->vm_truncate_count = mpnt->vm_truncate_count;
56938- flush_dcache_mmap_lock(mapping);
56939- /* insert tmp into the share list, just after mpnt */
56940- vma_prio_tree_add(tmp, mpnt);
56941- flush_dcache_mmap_unlock(mapping);
56942- spin_unlock(&mapping->i_mmap_lock);
56943+ tmp = dup_vma(mm, mpnt);
56944+ if (!tmp) {
56945+ retval = -ENOMEM;
56946+ goto out;
56947 }
56948
56949 /*
56950- * Clear hugetlb-related page reserves for children. This only
56951- * affects MAP_PRIVATE mappings. Faults generated by the child
56952- * are not guaranteed to succeed, even if read-only
56953- */
56954- if (is_vm_hugetlb_page(tmp))
56955- reset_vma_resv_huge_pages(tmp);
56956-
56957- /*
56958 * Link in the new vma and copy the page table entries.
56959 */
56960 *pprev = tmp;
56961@@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56962 if (retval)
56963 goto out;
56964 }
56965+
56966+#ifdef CONFIG_PAX_SEGMEXEC
56967+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56968+ struct vm_area_struct *mpnt_m;
56969+
56970+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56971+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56972+
56973+ if (!mpnt->vm_mirror)
56974+ continue;
56975+
56976+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56977+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56978+ mpnt->vm_mirror = mpnt_m;
56979+ } else {
56980+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56981+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56982+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56983+ mpnt->vm_mirror->vm_mirror = mpnt;
56984+ }
56985+ }
56986+ BUG_ON(mpnt_m);
56987+ }
56988+#endif
56989+
56990 /* a new mm has just been created */
56991 arch_dup_mmap(oldmm, mm);
56992 retval = 0;
56993@@ -431,14 +476,6 @@ out:
56994 flush_tlb_mm(oldmm);
56995 up_write(&oldmm->mmap_sem);
56996 return retval;
56997-fail_nomem_anon_vma_fork:
56998- mpol_put(pol);
56999-fail_nomem_policy:
57000- kmem_cache_free(vm_area_cachep, tmp);
57001-fail_nomem:
57002- retval = -ENOMEM;
57003- vm_unacct_memory(charge);
57004- goto out;
57005 }
57006
57007 static inline int mm_alloc_pgd(struct mm_struct * mm)
57008@@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
57009 spin_unlock(&fs->lock);
57010 return -EAGAIN;
57011 }
57012- fs->users++;
57013+ atomic_inc(&fs->users);
57014 spin_unlock(&fs->lock);
57015 return 0;
57016 }
57017 tsk->fs = copy_fs_struct(fs);
57018 if (!tsk->fs)
57019 return -ENOMEM;
57020+ gr_set_chroot_entries(tsk, &tsk->fs->root);
57021 return 0;
57022 }
57023
57024@@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
57025 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
57026 #endif
57027 retval = -EAGAIN;
57028+
57029+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
57030+
57031 if (atomic_read(&p->real_cred->user->processes) >=
57032 task_rlimit(p, RLIMIT_NPROC)) {
57033- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
57034- p->real_cred->user != INIT_USER)
57035+ if (p->real_cred->user != INIT_USER &&
57036+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
57037 goto bad_fork_free;
57038 }
57039
57040@@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
57041 goto bad_fork_free_pid;
57042 }
57043
57044+ gr_copy_label(p);
57045+
57046 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
57047 /*
57048 * Clear TID on mm_release()?
57049@@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
57050 bad_fork_free:
57051 free_task(p);
57052 fork_out:
57053+ gr_log_forkfail(retval);
57054+
57055 return ERR_PTR(retval);
57056 }
57057
57058@@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
57059 if (clone_flags & CLONE_PARENT_SETTID)
57060 put_user(nr, parent_tidptr);
57061
57062+ gr_handle_brute_check();
57063+
57064 if (clone_flags & CLONE_VFORK) {
57065 p->vfork_done = &vfork;
57066 init_completion(&vfork);
57067@@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
57068 return 0;
57069
57070 /* don't need lock here; in the worst case we'll do useless copy */
57071- if (fs->users == 1)
57072+ if (atomic_read(&fs->users) == 1)
57073 return 0;
57074
57075 *new_fsp = copy_fs_struct(fs);
57076@@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
57077 fs = current->fs;
57078 spin_lock(&fs->lock);
57079 current->fs = new_fs;
57080- if (--fs->users)
57081+ gr_set_chroot_entries(current, &current->fs->root);
57082+ if (atomic_dec_return(&fs->users))
57083 new_fs = NULL;
57084 else
57085 new_fs = fs;
57086diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
57087--- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
57088+++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
57089@@ -54,6 +54,7 @@
57090 #include <linux/mount.h>
57091 #include <linux/pagemap.h>
57092 #include <linux/syscalls.h>
57093+#include <linux/ptrace.h>
57094 #include <linux/signal.h>
57095 #include <linux/module.h>
57096 #include <linux/magic.h>
57097@@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
57098 struct page *page, *page_head;
57099 int err;
57100
57101+#ifdef CONFIG_PAX_SEGMEXEC
57102+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
57103+ return -EFAULT;
57104+#endif
57105+
57106 /*
57107 * The futex address must be "naturally" aligned.
57108 */
57109@@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
57110 struct futex_q q = futex_q_init;
57111 int ret;
57112
57113+ pax_track_stack();
57114+
57115 if (!bitset)
57116 return -EINVAL;
57117 q.bitset = bitset;
57118@@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
57119 struct futex_q q = futex_q_init;
57120 int res, ret;
57121
57122+ pax_track_stack();
57123+
57124 if (!bitset)
57125 return -EINVAL;
57126
57127@@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57128 {
57129 struct robust_list_head __user *head;
57130 unsigned long ret;
57131+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57132 const struct cred *cred = current_cred(), *pcred;
57133+#endif
57134
57135 if (!futex_cmpxchg_enabled)
57136 return -ENOSYS;
57137@@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57138 if (!p)
57139 goto err_unlock;
57140 ret = -EPERM;
57141+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57142+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
57143+ goto err_unlock;
57144+#else
57145 pcred = __task_cred(p);
57146 /* If victim is in different user_ns, then uids are not
57147 comparable, so we must have CAP_SYS_PTRACE */
57148@@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57149 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57150 goto err_unlock;
57151 ok:
57152+#endif
57153 head = p->robust_list;
57154 rcu_read_unlock();
57155 }
57156@@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57157 {
57158 u32 curval;
57159 int i;
57160+ mm_segment_t oldfs;
57161
57162 /*
57163 * This will fail and we want it. Some arch implementations do
57164@@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57165 * implementation, the non-functional ones will return
57166 * -ENOSYS.
57167 */
57168+ oldfs = get_fs();
57169+ set_fs(USER_DS);
57170 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57171 futex_cmpxchg_enabled = 1;
57172+ set_fs(oldfs);
57173
57174 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57175 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57176diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57177--- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57178+++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57179@@ -10,6 +10,7 @@
57180 #include <linux/compat.h>
57181 #include <linux/nsproxy.h>
57182 #include <linux/futex.h>
57183+#include <linux/ptrace.h>
57184
57185 #include <asm/uaccess.h>
57186
57187@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57188 {
57189 struct compat_robust_list_head __user *head;
57190 unsigned long ret;
57191- const struct cred *cred = current_cred(), *pcred;
57192+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57193+ const struct cred *cred = current_cred();
57194+ const struct cred *pcred;
57195+#endif
57196
57197 if (!futex_cmpxchg_enabled)
57198 return -ENOSYS;
57199@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57200 if (!p)
57201 goto err_unlock;
57202 ret = -EPERM;
57203+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57204+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
57205+ goto err_unlock;
57206+#else
57207 pcred = __task_cred(p);
57208 /* If victim is in different user_ns, then uids are not
57209 comparable, so we must have CAP_SYS_PTRACE */
57210@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57211 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57212 goto err_unlock;
57213 ok:
57214+#endif
57215 head = p->compat_robust_list;
57216 rcu_read_unlock();
57217 }
57218diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57219--- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57220+++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57221@@ -102,11 +102,6 @@ void gcov_enable_events(void)
57222 }
57223
57224 #ifdef CONFIG_MODULES
57225-static inline int within(void *addr, void *start, unsigned long size)
57226-{
57227- return ((addr >= start) && (addr < start + size));
57228-}
57229-
57230 /* Update list and generate events when modules are unloaded. */
57231 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57232 void *data)
57233@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57234 prev = NULL;
57235 /* Remove entries located in module from linked list. */
57236 for (info = gcov_info_head; info; info = info->next) {
57237- if (within(info, mod->module_core, mod->core_size)) {
57238+ if (within_module_core_rw((unsigned long)info, mod)) {
57239 if (prev)
57240 prev->next = info->next;
57241 else
57242diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57243--- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57244+++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57245@@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57246 local_irq_restore(flags);
57247 }
57248
57249-static void run_hrtimer_softirq(struct softirq_action *h)
57250+static void run_hrtimer_softirq(void)
57251 {
57252 hrtimer_peek_ahead_timers();
57253 }
57254diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57255--- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57256+++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57257@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57258 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57259 int ret = 0;
57260
57261+ if (!desc)
57262+ return -EINVAL;
57263+
57264 /* wakeup-capable irqs can be shared between drivers that
57265 * don't need to have the same sleep mode behaviors.
57266 */
57267diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57268--- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57269+++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57270@@ -49,6 +49,17 @@ void jump_label_unlock(void)
57271 mutex_unlock(&jump_label_mutex);
57272 }
57273
57274+static void jump_label_swap(void *a, void *b, int size)
57275+{
57276+ struct jump_entry t;
57277+
57278+ t = *(struct jump_entry *)a;
57279+ pax_open_kernel();
57280+ *(struct jump_entry *)a = *(struct jump_entry *)b;
57281+ *(struct jump_entry *)b = t;
57282+ pax_close_kernel();
57283+}
57284+
57285 static int jump_label_cmp(const void *a, const void *b)
57286 {
57287 const struct jump_entry *jea = a;
57288@@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57289
57290 size = (((unsigned long)stop - (unsigned long)start)
57291 / sizeof(struct jump_entry));
57292- sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57293+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57294 }
57295
57296 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57297@@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57298 count = e_module->nr_entries;
57299 iter = e_module->table;
57300 while (count--) {
57301- if (within_module_init(iter->code, mod))
57302+ if (within_module_init(iter->code, mod)) {
57303+ pax_open_kernel();
57304 iter->key = 0;
57305+ pax_close_kernel();
57306+ }
57307 iter++;
57308 }
57309 }
57310diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57311--- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57312+++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57313@@ -11,6 +11,9 @@
57314 * Changed the compression method from stem compression to "table lookup"
57315 * compression (see scripts/kallsyms.c for a more complete description)
57316 */
57317+#ifdef CONFIG_GRKERNSEC_HIDESYM
57318+#define __INCLUDED_BY_HIDESYM 1
57319+#endif
57320 #include <linux/kallsyms.h>
57321 #include <linux/module.h>
57322 #include <linux/init.h>
57323@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57324
57325 static inline int is_kernel_inittext(unsigned long addr)
57326 {
57327+ if (system_state != SYSTEM_BOOTING)
57328+ return 0;
57329+
57330 if (addr >= (unsigned long)_sinittext
57331 && addr <= (unsigned long)_einittext)
57332 return 1;
57333 return 0;
57334 }
57335
57336+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57337+#ifdef CONFIG_MODULES
57338+static inline int is_module_text(unsigned long addr)
57339+{
57340+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57341+ return 1;
57342+
57343+ addr = ktla_ktva(addr);
57344+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57345+}
57346+#else
57347+static inline int is_module_text(unsigned long addr)
57348+{
57349+ return 0;
57350+}
57351+#endif
57352+#endif
57353+
57354 static inline int is_kernel_text(unsigned long addr)
57355 {
57356 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57357@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57358
57359 static inline int is_kernel(unsigned long addr)
57360 {
57361+
57362+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57363+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
57364+ return 1;
57365+
57366+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57367+#else
57368 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57369+#endif
57370+
57371 return 1;
57372 return in_gate_area_no_mm(addr);
57373 }
57374
57375 static int is_ksym_addr(unsigned long addr)
57376 {
57377+
57378+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57379+ if (is_module_text(addr))
57380+ return 0;
57381+#endif
57382+
57383 if (all_var)
57384 return is_kernel(addr);
57385
57386@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57387
57388 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57389 {
57390- iter->name[0] = '\0';
57391 iter->nameoff = get_symbol_offset(new_pos);
57392 iter->pos = new_pos;
57393 }
57394@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57395 {
57396 struct kallsym_iter *iter = m->private;
57397
57398+#ifdef CONFIG_GRKERNSEC_HIDESYM
57399+ if (current_uid())
57400+ return 0;
57401+#endif
57402+
57403 /* Some debugging symbols have no name. Ignore them. */
57404 if (!iter->name[0])
57405 return 0;
57406@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57407 struct kallsym_iter *iter;
57408 int ret;
57409
57410- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57411+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57412 if (!iter)
57413 return -ENOMEM;
57414 reset_iter(iter, 0);
57415diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57416--- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57417+++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57418@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57419 * If module auto-loading support is disabled then this function
57420 * becomes a no-operation.
57421 */
57422-int __request_module(bool wait, const char *fmt, ...)
57423+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57424 {
57425- va_list args;
57426 char module_name[MODULE_NAME_LEN];
57427 unsigned int max_modprobes;
57428 int ret;
57429- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57430+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57431 static char *envp[] = { "HOME=/",
57432 "TERM=linux",
57433 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57434@@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57435 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57436 static int kmod_loop_msg;
57437
57438- va_start(args, fmt);
57439- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57440- va_end(args);
57441+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57442 if (ret >= MODULE_NAME_LEN)
57443 return -ENAMETOOLONG;
57444
57445@@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57446 if (ret)
57447 return ret;
57448
57449+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57450+ if (!current_uid()) {
57451+ /* hack to workaround consolekit/udisks stupidity */
57452+ read_lock(&tasklist_lock);
57453+ if (!strcmp(current->comm, "mount") &&
57454+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57455+ read_unlock(&tasklist_lock);
57456+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57457+ return -EPERM;
57458+ }
57459+ read_unlock(&tasklist_lock);
57460+ }
57461+#endif
57462+
57463 /* If modprobe needs a service that is in a module, we get a recursive
57464 * loop. Limit the number of running kmod threads to max_threads/2 or
57465 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57466@@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57467 atomic_dec(&kmod_concurrent);
57468 return ret;
57469 }
57470+
57471+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57472+{
57473+ va_list args;
57474+ int ret;
57475+
57476+ va_start(args, fmt);
57477+ ret = ____request_module(wait, module_param, fmt, args);
57478+ va_end(args);
57479+
57480+ return ret;
57481+}
57482+
57483+int __request_module(bool wait, const char *fmt, ...)
57484+{
57485+ va_list args;
57486+ int ret;
57487+
57488+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57489+ if (current_uid()) {
57490+ char module_param[MODULE_NAME_LEN];
57491+
57492+ memset(module_param, 0, sizeof(module_param));
57493+
57494+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57495+
57496+ va_start(args, fmt);
57497+ ret = ____request_module(wait, module_param, fmt, args);
57498+ va_end(args);
57499+
57500+ return ret;
57501+ }
57502+#endif
57503+
57504+ va_start(args, fmt);
57505+ ret = ____request_module(wait, NULL, fmt, args);
57506+ va_end(args);
57507+
57508+ return ret;
57509+}
57510+
57511 EXPORT_SYMBOL(__request_module);
57512 #endif /* CONFIG_MODULES */
57513
57514diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57515--- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57516+++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57517@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57518 * kernel image and loaded module images reside. This is required
57519 * so x86_64 can correctly handle the %rip-relative fixups.
57520 */
57521- kip->insns = module_alloc(PAGE_SIZE);
57522+ kip->insns = module_alloc_exec(PAGE_SIZE);
57523 if (!kip->insns) {
57524 kfree(kip);
57525 return NULL;
57526@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57527 */
57528 if (!list_is_singular(&kip->list)) {
57529 list_del(&kip->list);
57530- module_free(NULL, kip->insns);
57531+ module_free_exec(NULL, kip->insns);
57532 kfree(kip);
57533 }
57534 return 1;
57535@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57536 {
57537 int i, err = 0;
57538 unsigned long offset = 0, size = 0;
57539- char *modname, namebuf[128];
57540+ char *modname, namebuf[KSYM_NAME_LEN];
57541 const char *symbol_name;
57542 void *addr;
57543 struct kprobe_blackpoint *kb;
57544@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57545 const char *sym = NULL;
57546 unsigned int i = *(loff_t *) v;
57547 unsigned long offset = 0;
57548- char *modname, namebuf[128];
57549+ char *modname, namebuf[KSYM_NAME_LEN];
57550
57551 head = &kprobe_table[i];
57552 preempt_disable();
57553diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57554--- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57555+++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57556@@ -571,6 +571,10 @@ static int static_obj(void *obj)
57557 end = (unsigned long) &_end,
57558 addr = (unsigned long) obj;
57559
57560+#ifdef CONFIG_PAX_KERNEXEC
57561+ start = ktla_ktva(start);
57562+#endif
57563+
57564 /*
57565 * static variable?
57566 */
57567@@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57568 if (!static_obj(lock->key)) {
57569 debug_locks_off();
57570 printk("INFO: trying to register non-static key.\n");
57571+ printk("lock:%pS key:%pS.\n", lock, lock->key);
57572 printk("the code is fine but needs lockdep annotation.\n");
57573 printk("turning off the locking correctness validator.\n");
57574 dump_stack();
57575@@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57576 if (!class)
57577 return 0;
57578 }
57579- atomic_inc((atomic_t *)&class->ops);
57580+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57581 if (very_verbose(class)) {
57582 printk("\nacquire class [%p] %s", class->key, class->name);
57583 if (class->name_version > 1)
57584diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57585--- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57586+++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57587@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57588
57589 static void print_name(struct seq_file *m, struct lock_class *class)
57590 {
57591- char str[128];
57592+ char str[KSYM_NAME_LEN];
57593 const char *name = class->name;
57594
57595 if (!name) {
57596diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57597--- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57598+++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57599@@ -57,6 +57,7 @@
57600 #include <linux/kmemleak.h>
57601 #include <linux/jump_label.h>
57602 #include <linux/pfn.h>
57603+#include <linux/grsecurity.h>
57604
57605 #define CREATE_TRACE_POINTS
57606 #include <trace/events/module.h>
57607@@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57608
57609 /* Bounds of module allocation, for speeding __module_address.
57610 * Protected by module_mutex. */
57611-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57612+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57613+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57614
57615 int register_module_notifier(struct notifier_block * nb)
57616 {
57617@@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57618 return true;
57619
57620 list_for_each_entry_rcu(mod, &modules, list) {
57621- struct symsearch arr[] = {
57622+ struct symsearch modarr[] = {
57623 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57624 NOT_GPL_ONLY, false },
57625 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57626@@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57627 #endif
57628 };
57629
57630- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57631+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57632 return true;
57633 }
57634 return false;
57635@@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57636 static int percpu_modalloc(struct module *mod,
57637 unsigned long size, unsigned long align)
57638 {
57639- if (align > PAGE_SIZE) {
57640+ if (align-1 >= PAGE_SIZE) {
57641 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57642 mod->name, align, PAGE_SIZE);
57643 align = PAGE_SIZE;
57644@@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57645 */
57646 #ifdef CONFIG_SYSFS
57647
57648-#ifdef CONFIG_KALLSYMS
57649+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57650 static inline bool sect_empty(const Elf_Shdr *sect)
57651 {
57652 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57653@@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57654 {
57655 unsigned long total_pages;
57656
57657- if (mod->module_core == module_region) {
57658+ if (mod->module_core_rx == module_region) {
57659 /* Set core as NX+RW */
57660- total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57661- set_memory_nx((unsigned long)mod->module_core, total_pages);
57662- set_memory_rw((unsigned long)mod->module_core, total_pages);
57663+ total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57664+ set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57665+ set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57666
57667- } else if (mod->module_init == module_region) {
57668+ } else if (mod->module_init_rx == module_region) {
57669 /* Set init as NX+RW */
57670- total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57671- set_memory_nx((unsigned long)mod->module_init, total_pages);
57672- set_memory_rw((unsigned long)mod->module_init, total_pages);
57673+ total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57674+ set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57675+ set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57676 }
57677 }
57678
57679@@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57680
57681 mutex_lock(&module_mutex);
57682 list_for_each_entry_rcu(mod, &modules, list) {
57683- if ((mod->module_core) && (mod->core_text_size)) {
57684- set_page_attributes(mod->module_core,
57685- mod->module_core + mod->core_text_size,
57686+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57687+ set_page_attributes(mod->module_core_rx,
57688+ mod->module_core_rx + mod->core_size_rx,
57689 set_memory_rw);
57690 }
57691- if ((mod->module_init) && (mod->init_text_size)) {
57692- set_page_attributes(mod->module_init,
57693- mod->module_init + mod->init_text_size,
57694+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57695+ set_page_attributes(mod->module_init_rx,
57696+ mod->module_init_rx + mod->init_size_rx,
57697 set_memory_rw);
57698 }
57699 }
57700@@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57701
57702 mutex_lock(&module_mutex);
57703 list_for_each_entry_rcu(mod, &modules, list) {
57704- if ((mod->module_core) && (mod->core_text_size)) {
57705- set_page_attributes(mod->module_core,
57706- mod->module_core + mod->core_text_size,
57707+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57708+ set_page_attributes(mod->module_core_rx,
57709+ mod->module_core_rx + mod->core_size_rx,
57710 set_memory_ro);
57711 }
57712- if ((mod->module_init) && (mod->init_text_size)) {
57713- set_page_attributes(mod->module_init,
57714- mod->module_init + mod->init_text_size,
57715+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57716+ set_page_attributes(mod->module_init_rx,
57717+ mod->module_init_rx + mod->init_size_rx,
57718 set_memory_ro);
57719 }
57720 }
57721@@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57722 destroy_params(mod->kp, mod->num_kp);
57723
57724 /* This may be NULL, but that's OK */
57725- unset_section_ro_nx(mod, mod->module_init);
57726- module_free(mod, mod->module_init);
57727+ unset_section_ro_nx(mod, mod->module_init_rx);
57728+ module_free(mod, mod->module_init_rw);
57729+ module_free_exec(mod, mod->module_init_rx);
57730 kfree(mod->args);
57731 percpu_modfree(mod);
57732
57733 /* Free lock-classes: */
57734- lockdep_free_key_range(mod->module_core, mod->core_size);
57735+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57736+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57737
57738 /* Finally, free the core (containing the module structure) */
57739- unset_section_ro_nx(mod, mod->module_core);
57740- module_free(mod, mod->module_core);
57741+ unset_section_ro_nx(mod, mod->module_core_rx);
57742+ module_free_exec(mod, mod->module_core_rx);
57743+ module_free(mod, mod->module_core_rw);
57744
57745 #ifdef CONFIG_MPU
57746 update_protections(current->mm);
57747@@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57748 unsigned int i;
57749 int ret = 0;
57750 const struct kernel_symbol *ksym;
57751+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57752+ int is_fs_load = 0;
57753+ int register_filesystem_found = 0;
57754+ char *p;
57755+
57756+ p = strstr(mod->args, "grsec_modharden_fs");
57757+ if (p) {
57758+ char *endptr = p + strlen("grsec_modharden_fs");
57759+ /* copy \0 as well */
57760+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57761+ is_fs_load = 1;
57762+ }
57763+#endif
57764
57765 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57766 const char *name = info->strtab + sym[i].st_name;
57767
57768+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57769+ /* it's a real shame this will never get ripped and copied
57770+ upstream! ;(
57771+ */
57772+ if (is_fs_load && !strcmp(name, "register_filesystem"))
57773+ register_filesystem_found = 1;
57774+#endif
57775+
57776 switch (sym[i].st_shndx) {
57777 case SHN_COMMON:
57778 /* We compiled with -fno-common. These are not
57779@@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57780 ksym = resolve_symbol_wait(mod, info, name);
57781 /* Ok if resolved. */
57782 if (ksym && !IS_ERR(ksym)) {
57783+ pax_open_kernel();
57784 sym[i].st_value = ksym->value;
57785+ pax_close_kernel();
57786 break;
57787 }
57788
57789@@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57790 secbase = (unsigned long)mod_percpu(mod);
57791 else
57792 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57793+ pax_open_kernel();
57794 sym[i].st_value += secbase;
57795+ pax_close_kernel();
57796 break;
57797 }
57798 }
57799
57800+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57801+ if (is_fs_load && !register_filesystem_found) {
57802+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57803+ ret = -EPERM;
57804+ }
57805+#endif
57806+
57807 return ret;
57808 }
57809
57810@@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57811 || s->sh_entsize != ~0UL
57812 || strstarts(sname, ".init"))
57813 continue;
57814- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57815+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57816+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57817+ else
57818+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57819 DEBUGP("\t%s\n", name);
57820 }
57821- switch (m) {
57822- case 0: /* executable */
57823- mod->core_size = debug_align(mod->core_size);
57824- mod->core_text_size = mod->core_size;
57825- break;
57826- case 1: /* RO: text and ro-data */
57827- mod->core_size = debug_align(mod->core_size);
57828- mod->core_ro_size = mod->core_size;
57829- break;
57830- case 3: /* whole core */
57831- mod->core_size = debug_align(mod->core_size);
57832- break;
57833- }
57834 }
57835
57836 DEBUGP("Init section allocation order:\n");
57837@@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57838 || s->sh_entsize != ~0UL
57839 || !strstarts(sname, ".init"))
57840 continue;
57841- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57842- | INIT_OFFSET_MASK);
57843+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57844+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57845+ else
57846+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57847+ s->sh_entsize |= INIT_OFFSET_MASK;
57848 DEBUGP("\t%s\n", sname);
57849 }
57850- switch (m) {
57851- case 0: /* executable */
57852- mod->init_size = debug_align(mod->init_size);
57853- mod->init_text_size = mod->init_size;
57854- break;
57855- case 1: /* RO: text and ro-data */
57856- mod->init_size = debug_align(mod->init_size);
57857- mod->init_ro_size = mod->init_size;
57858- break;
57859- case 3: /* whole init */
57860- mod->init_size = debug_align(mod->init_size);
57861- break;
57862- }
57863 }
57864 }
57865
57866@@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57867
57868 /* Put symbol section at end of init part of module. */
57869 symsect->sh_flags |= SHF_ALLOC;
57870- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57871+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57872 info->index.sym) | INIT_OFFSET_MASK;
57873 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57874
57875@@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57876 }
57877
57878 /* Append room for core symbols at end of core part. */
57879- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57880- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57881+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57882+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57883
57884 /* Put string table section at end of init part of module. */
57885 strsect->sh_flags |= SHF_ALLOC;
57886- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57887+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57888 info->index.str) | INIT_OFFSET_MASK;
57889 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57890
57891 /* Append room for core symbols' strings at end of core part. */
57892- info->stroffs = mod->core_size;
57893+ info->stroffs = mod->core_size_rx;
57894 __set_bit(0, info->strmap);
57895- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57896+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57897 }
57898
57899 static void add_kallsyms(struct module *mod, const struct load_info *info)
57900@@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57901 /* Make sure we get permanent strtab: don't use info->strtab. */
57902 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57903
57904+ pax_open_kernel();
57905+
57906 /* Set types up while we still have access to sections. */
57907 for (i = 0; i < mod->num_symtab; i++)
57908 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57909
57910- mod->core_symtab = dst = mod->module_core + info->symoffs;
57911+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57912 src = mod->symtab;
57913 *dst = *src;
57914 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57915@@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57916 }
57917 mod->core_num_syms = ndst;
57918
57919- mod->core_strtab = s = mod->module_core + info->stroffs;
57920+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57921 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57922 if (test_bit(i, info->strmap))
57923 *++s = mod->strtab[i];
57924+
57925+ pax_close_kernel();
57926 }
57927 #else
57928 static inline void layout_symtab(struct module *mod, struct load_info *info)
57929@@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57930 ddebug_remove_module(debug->modname);
57931 }
57932
57933-static void *module_alloc_update_bounds(unsigned long size)
57934+static void *module_alloc_update_bounds_rw(unsigned long size)
57935 {
57936 void *ret = module_alloc(size);
57937
57938 if (ret) {
57939 mutex_lock(&module_mutex);
57940 /* Update module bounds. */
57941- if ((unsigned long)ret < module_addr_min)
57942- module_addr_min = (unsigned long)ret;
57943- if ((unsigned long)ret + size > module_addr_max)
57944- module_addr_max = (unsigned long)ret + size;
57945+ if ((unsigned long)ret < module_addr_min_rw)
57946+ module_addr_min_rw = (unsigned long)ret;
57947+ if ((unsigned long)ret + size > module_addr_max_rw)
57948+ module_addr_max_rw = (unsigned long)ret + size;
57949+ mutex_unlock(&module_mutex);
57950+ }
57951+ return ret;
57952+}
57953+
57954+static void *module_alloc_update_bounds_rx(unsigned long size)
57955+{
57956+ void *ret = module_alloc_exec(size);
57957+
57958+ if (ret) {
57959+ mutex_lock(&module_mutex);
57960+ /* Update module bounds. */
57961+ if ((unsigned long)ret < module_addr_min_rx)
57962+ module_addr_min_rx = (unsigned long)ret;
57963+ if ((unsigned long)ret + size > module_addr_max_rx)
57964+ module_addr_max_rx = (unsigned long)ret + size;
57965 mutex_unlock(&module_mutex);
57966 }
57967 return ret;
57968@@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57969 void *ptr;
57970
57971 /* Do the allocs. */
57972- ptr = module_alloc_update_bounds(mod->core_size);
57973+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57974 /*
57975 * The pointer to this block is stored in the module structure
57976 * which is inside the block. Just mark it as not being a
57977@@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
57978 if (!ptr)
57979 return -ENOMEM;
57980
57981- memset(ptr, 0, mod->core_size);
57982- mod->module_core = ptr;
57983+ memset(ptr, 0, mod->core_size_rw);
57984+ mod->module_core_rw = ptr;
57985
57986- ptr = module_alloc_update_bounds(mod->init_size);
57987+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57988 /*
57989 * The pointer to this block is stored in the module structure
57990 * which is inside the block. This block doesn't need to be
57991 * scanned as it contains data and code that will be freed
57992 * after the module is initialized.
57993 */
57994- kmemleak_ignore(ptr);
57995- if (!ptr && mod->init_size) {
57996- module_free(mod, mod->module_core);
57997+ kmemleak_not_leak(ptr);
57998+ if (!ptr && mod->init_size_rw) {
57999+ module_free(mod, mod->module_core_rw);
58000 return -ENOMEM;
58001 }
58002- memset(ptr, 0, mod->init_size);
58003- mod->module_init = ptr;
58004+ memset(ptr, 0, mod->init_size_rw);
58005+ mod->module_init_rw = ptr;
58006+
58007+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
58008+ kmemleak_not_leak(ptr);
58009+ if (!ptr) {
58010+ module_free(mod, mod->module_init_rw);
58011+ module_free(mod, mod->module_core_rw);
58012+ return -ENOMEM;
58013+ }
58014+
58015+ pax_open_kernel();
58016+ memset(ptr, 0, mod->core_size_rx);
58017+ pax_close_kernel();
58018+ mod->module_core_rx = ptr;
58019+
58020+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
58021+ kmemleak_not_leak(ptr);
58022+ if (!ptr && mod->init_size_rx) {
58023+ module_free_exec(mod, mod->module_core_rx);
58024+ module_free(mod, mod->module_init_rw);
58025+ module_free(mod, mod->module_core_rw);
58026+ return -ENOMEM;
58027+ }
58028+
58029+ pax_open_kernel();
58030+ memset(ptr, 0, mod->init_size_rx);
58031+ pax_close_kernel();
58032+ mod->module_init_rx = ptr;
58033
58034 /* Transfer each section which specifies SHF_ALLOC */
58035 DEBUGP("final section addresses:\n");
58036@@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
58037 if (!(shdr->sh_flags & SHF_ALLOC))
58038 continue;
58039
58040- if (shdr->sh_entsize & INIT_OFFSET_MASK)
58041- dest = mod->module_init
58042- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58043- else
58044- dest = mod->module_core + shdr->sh_entsize;
58045+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
58046+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58047+ dest = mod->module_init_rw
58048+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58049+ else
58050+ dest = mod->module_init_rx
58051+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58052+ } else {
58053+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58054+ dest = mod->module_core_rw + shdr->sh_entsize;
58055+ else
58056+ dest = mod->module_core_rx + shdr->sh_entsize;
58057+ }
58058+
58059+ if (shdr->sh_type != SHT_NOBITS) {
58060+
58061+#ifdef CONFIG_PAX_KERNEXEC
58062+#ifdef CONFIG_X86_64
58063+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
58064+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
58065+#endif
58066+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
58067+ pax_open_kernel();
58068+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58069+ pax_close_kernel();
58070+ } else
58071+#endif
58072
58073- if (shdr->sh_type != SHT_NOBITS)
58074 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58075+ }
58076 /* Update sh_addr to point to copy in image. */
58077- shdr->sh_addr = (unsigned long)dest;
58078+
58079+#ifdef CONFIG_PAX_KERNEXEC
58080+ if (shdr->sh_flags & SHF_EXECINSTR)
58081+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
58082+ else
58083+#endif
58084+
58085+ shdr->sh_addr = (unsigned long)dest;
58086 DEBUGP("\t0x%lx %s\n",
58087 shdr->sh_addr, info->secstrings + shdr->sh_name);
58088 }
58089@@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
58090 * Do it before processing of module parameters, so the module
58091 * can provide parameter accessor functions of its own.
58092 */
58093- if (mod->module_init)
58094- flush_icache_range((unsigned long)mod->module_init,
58095- (unsigned long)mod->module_init
58096- + mod->init_size);
58097- flush_icache_range((unsigned long)mod->module_core,
58098- (unsigned long)mod->module_core + mod->core_size);
58099+ if (mod->module_init_rx)
58100+ flush_icache_range((unsigned long)mod->module_init_rx,
58101+ (unsigned long)mod->module_init_rx
58102+ + mod->init_size_rx);
58103+ flush_icache_range((unsigned long)mod->module_core_rx,
58104+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
58105
58106 set_fs(old_fs);
58107 }
58108@@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
58109 {
58110 kfree(info->strmap);
58111 percpu_modfree(mod);
58112- module_free(mod, mod->module_init);
58113- module_free(mod, mod->module_core);
58114+ module_free_exec(mod, mod->module_init_rx);
58115+ module_free_exec(mod, mod->module_core_rx);
58116+ module_free(mod, mod->module_init_rw);
58117+ module_free(mod, mod->module_core_rw);
58118 }
58119
58120 static int post_relocation(struct module *mod, const struct load_info *info)
58121@@ -2748,9 +2843,38 @@ static struct module *load_module(void _
58122 if (err)
58123 goto free_unload;
58124
58125+ /* Now copy in args */
58126+ mod->args = strndup_user(uargs, ~0UL >> 1);
58127+ if (IS_ERR(mod->args)) {
58128+ err = PTR_ERR(mod->args);
58129+ goto free_unload;
58130+ }
58131+
58132 /* Set up MODINFO_ATTR fields */
58133 setup_modinfo(mod, &info);
58134
58135+#ifdef CONFIG_GRKERNSEC_MODHARDEN
58136+ {
58137+ char *p, *p2;
58138+
58139+ if (strstr(mod->args, "grsec_modharden_netdev")) {
58140+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58141+ err = -EPERM;
58142+ goto free_modinfo;
58143+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58144+ p += strlen("grsec_modharden_normal");
58145+ p2 = strstr(p, "_");
58146+ if (p2) {
58147+ *p2 = '\0';
58148+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58149+ *p2 = '_';
58150+ }
58151+ err = -EPERM;
58152+ goto free_modinfo;
58153+ }
58154+ }
58155+#endif
58156+
58157 /* Fix up syms, so that st_value is a pointer to location. */
58158 err = simplify_symbols(mod, &info);
58159 if (err < 0)
58160@@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58161
58162 flush_module_icache(mod);
58163
58164- /* Now copy in args */
58165- mod->args = strndup_user(uargs, ~0UL >> 1);
58166- if (IS_ERR(mod->args)) {
58167- err = PTR_ERR(mod->args);
58168- goto free_arch_cleanup;
58169- }
58170-
58171 /* Mark state as coming so strong_try_module_get() ignores us. */
58172 mod->state = MODULE_STATE_COMING;
58173
58174@@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58175 unlock:
58176 mutex_unlock(&module_mutex);
58177 synchronize_sched();
58178- kfree(mod->args);
58179- free_arch_cleanup:
58180 module_arch_cleanup(mod);
58181 free_modinfo:
58182 free_modinfo(mod);
58183+ kfree(mod->args);
58184 free_unload:
58185 module_unload_free(mod);
58186 free_module:
58187@@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58188 MODULE_STATE_COMING, mod);
58189
58190 /* Set RO and NX regions for core */
58191- set_section_ro_nx(mod->module_core,
58192- mod->core_text_size,
58193- mod->core_ro_size,
58194- mod->core_size);
58195+ set_section_ro_nx(mod->module_core_rx,
58196+ mod->core_size_rx,
58197+ mod->core_size_rx,
58198+ mod->core_size_rx);
58199
58200 /* Set RO and NX regions for init */
58201- set_section_ro_nx(mod->module_init,
58202- mod->init_text_size,
58203- mod->init_ro_size,
58204- mod->init_size);
58205+ set_section_ro_nx(mod->module_init_rx,
58206+ mod->init_size_rx,
58207+ mod->init_size_rx,
58208+ mod->init_size_rx);
58209
58210 do_mod_ctors(mod);
58211 /* Start the module */
58212@@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58213 mod->symtab = mod->core_symtab;
58214 mod->strtab = mod->core_strtab;
58215 #endif
58216- unset_section_ro_nx(mod, mod->module_init);
58217- module_free(mod, mod->module_init);
58218- mod->module_init = NULL;
58219- mod->init_size = 0;
58220- mod->init_text_size = 0;
58221+ unset_section_ro_nx(mod, mod->module_init_rx);
58222+ module_free(mod, mod->module_init_rw);
58223+ module_free_exec(mod, mod->module_init_rx);
58224+ mod->module_init_rw = NULL;
58225+ mod->module_init_rx = NULL;
58226+ mod->init_size_rw = 0;
58227+ mod->init_size_rx = 0;
58228 mutex_unlock(&module_mutex);
58229
58230 return 0;
58231@@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58232 unsigned long nextval;
58233
58234 /* At worse, next value is at end of module */
58235- if (within_module_init(addr, mod))
58236- nextval = (unsigned long)mod->module_init+mod->init_text_size;
58237+ if (within_module_init_rx(addr, mod))
58238+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58239+ else if (within_module_init_rw(addr, mod))
58240+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58241+ else if (within_module_core_rx(addr, mod))
58242+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58243+ else if (within_module_core_rw(addr, mod))
58244+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58245 else
58246- nextval = (unsigned long)mod->module_core+mod->core_text_size;
58247+ return NULL;
58248
58249 /* Scan for closest preceding symbol, and next symbol. (ELF
58250 starts real symbols at 1). */
58251@@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58252 char buf[8];
58253
58254 seq_printf(m, "%s %u",
58255- mod->name, mod->init_size + mod->core_size);
58256+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58257 print_unload_info(m, mod);
58258
58259 /* Informative for users. */
58260@@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58261 mod->state == MODULE_STATE_COMING ? "Loading":
58262 "Live");
58263 /* Used by oprofile and other similar tools. */
58264- seq_printf(m, " 0x%pK", mod->module_core);
58265+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58266
58267 /* Taints info */
58268 if (mod->taints)
58269@@ -3260,7 +3384,17 @@ static const struct file_operations proc
58270
58271 static int __init proc_modules_init(void)
58272 {
58273+#ifndef CONFIG_GRKERNSEC_HIDESYM
58274+#ifdef CONFIG_GRKERNSEC_PROC_USER
58275+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58276+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58277+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58278+#else
58279 proc_create("modules", 0, NULL, &proc_modules_operations);
58280+#endif
58281+#else
58282+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58283+#endif
58284 return 0;
58285 }
58286 module_init(proc_modules_init);
58287@@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58288 {
58289 struct module *mod;
58290
58291- if (addr < module_addr_min || addr > module_addr_max)
58292+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58293+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
58294 return NULL;
58295
58296 list_for_each_entry_rcu(mod, &modules, list)
58297- if (within_module_core(addr, mod)
58298- || within_module_init(addr, mod))
58299+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
58300 return mod;
58301 return NULL;
58302 }
58303@@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58304 */
58305 struct module *__module_text_address(unsigned long addr)
58306 {
58307- struct module *mod = __module_address(addr);
58308+ struct module *mod;
58309+
58310+#ifdef CONFIG_X86_32
58311+ addr = ktla_ktva(addr);
58312+#endif
58313+
58314+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58315+ return NULL;
58316+
58317+ mod = __module_address(addr);
58318+
58319 if (mod) {
58320 /* Make sure it's within the text section. */
58321- if (!within(addr, mod->module_init, mod->init_text_size)
58322- && !within(addr, mod->module_core, mod->core_text_size))
58323+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58324 mod = NULL;
58325 }
58326 return mod;
58327diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58328--- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58329+++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58330@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58331 */
58332
58333 for (;;) {
58334- struct thread_info *owner;
58335+ struct task_struct *owner;
58336
58337 /*
58338 * If we own the BKL, then don't spin. The owner of
58339@@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58340 spin_lock_mutex(&lock->wait_lock, flags);
58341
58342 debug_mutex_lock_common(lock, &waiter);
58343- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58344+ debug_mutex_add_waiter(lock, &waiter, task);
58345
58346 /* add waiting tasks to the end of the waitqueue (FIFO): */
58347 list_add_tail(&waiter.list, &lock->wait_list);
58348@@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58349 * TASK_UNINTERRUPTIBLE case.)
58350 */
58351 if (unlikely(signal_pending_state(state, task))) {
58352- mutex_remove_waiter(lock, &waiter,
58353- task_thread_info(task));
58354+ mutex_remove_waiter(lock, &waiter, task);
58355 mutex_release(&lock->dep_map, 1, ip);
58356 spin_unlock_mutex(&lock->wait_lock, flags);
58357
58358@@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58359 done:
58360 lock_acquired(&lock->dep_map, ip);
58361 /* got the lock - rejoice! */
58362- mutex_remove_waiter(lock, &waiter, current_thread_info());
58363+ mutex_remove_waiter(lock, &waiter, task);
58364 mutex_set_owner(lock);
58365
58366 /* set it to 0 if there are no waiters left: */
58367diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58368--- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58369+++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58370@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58371 }
58372
58373 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58374- struct thread_info *ti)
58375+ struct task_struct *task)
58376 {
58377 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58378
58379 /* Mark the current thread as blocked on the lock: */
58380- ti->task->blocked_on = waiter;
58381+ task->blocked_on = waiter;
58382 }
58383
58384 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58385- struct thread_info *ti)
58386+ struct task_struct *task)
58387 {
58388 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58389- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58390- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58391- ti->task->blocked_on = NULL;
58392+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
58393+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58394+ task->blocked_on = NULL;
58395
58396 list_del_init(&waiter->list);
58397 waiter->task = NULL;
58398@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58399 return;
58400
58401 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58402- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58403+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
58404 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58405 mutex_clear_owner(lock);
58406 }
58407diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58408--- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58409+++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58410@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58411 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58412 extern void debug_mutex_add_waiter(struct mutex *lock,
58413 struct mutex_waiter *waiter,
58414- struct thread_info *ti);
58415+ struct task_struct *task);
58416 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58417- struct thread_info *ti);
58418+ struct task_struct *task);
58419 extern void debug_mutex_unlock(struct mutex *lock);
58420 extern void debug_mutex_init(struct mutex *lock, const char *name,
58421 struct lock_class_key *key);
58422
58423 static inline void mutex_set_owner(struct mutex *lock)
58424 {
58425- lock->owner = current_thread_info();
58426+ lock->owner = current;
58427 }
58428
58429 static inline void mutex_clear_owner(struct mutex *lock)
58430diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58431--- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58432+++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58433@@ -19,7 +19,7 @@
58434 #ifdef CONFIG_SMP
58435 static inline void mutex_set_owner(struct mutex *lock)
58436 {
58437- lock->owner = current_thread_info();
58438+ lock->owner = current;
58439 }
58440
58441 static inline void mutex_clear_owner(struct mutex *lock)
58442diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58443--- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58444+++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58445@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58446 padata->pd = pd;
58447 padata->cb_cpu = cb_cpu;
58448
58449- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58450- atomic_set(&pd->seq_nr, -1);
58451+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58452+ atomic_set_unchecked(&pd->seq_nr, -1);
58453
58454- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58455+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58456
58457 target_cpu = padata_cpu_hash(padata);
58458 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58459@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58460 padata_init_pqueues(pd);
58461 padata_init_squeues(pd);
58462 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58463- atomic_set(&pd->seq_nr, -1);
58464+ atomic_set_unchecked(&pd->seq_nr, -1);
58465 atomic_set(&pd->reorder_objects, 0);
58466 atomic_set(&pd->refcnt, 0);
58467 pd->pinst = pinst;
58468diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58469--- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58470+++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58471@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58472 const char *board;
58473
58474 printk(KERN_WARNING "------------[ cut here ]------------\n");
58475- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58476+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58477 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58478 if (board)
58479 printk(KERN_WARNING "Hardware name: %s\n", board);
58480@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58481 */
58482 void __stack_chk_fail(void)
58483 {
58484- panic("stack-protector: Kernel stack is corrupted in: %p\n",
58485+ dump_stack();
58486+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58487 __builtin_return_address(0));
58488 }
58489 EXPORT_SYMBOL(__stack_chk_fail);
58490diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58491--- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58492+++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58493@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58494 return 0;
58495 }
58496
58497-static atomic64_t perf_event_id;
58498+static atomic64_unchecked_t perf_event_id;
58499
58500 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58501 enum event_type_t event_type);
58502@@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58503
58504 static inline u64 perf_event_count(struct perf_event *event)
58505 {
58506- return local64_read(&event->count) + atomic64_read(&event->child_count);
58507+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58508 }
58509
58510 static u64 perf_event_read(struct perf_event *event)
58511@@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58512 mutex_lock(&event->child_mutex);
58513 total += perf_event_read(event);
58514 *enabled += event->total_time_enabled +
58515- atomic64_read(&event->child_total_time_enabled);
58516+ atomic64_read_unchecked(&event->child_total_time_enabled);
58517 *running += event->total_time_running +
58518- atomic64_read(&event->child_total_time_running);
58519+ atomic64_read_unchecked(&event->child_total_time_running);
58520
58521 list_for_each_entry(child, &event->child_list, child_list) {
58522 total += perf_event_read(child);
58523@@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58524 userpg->offset -= local64_read(&event->hw.prev_count);
58525
58526 userpg->time_enabled = event->total_time_enabled +
58527- atomic64_read(&event->child_total_time_enabled);
58528+ atomic64_read_unchecked(&event->child_total_time_enabled);
58529
58530 userpg->time_running = event->total_time_running +
58531- atomic64_read(&event->child_total_time_running);
58532+ atomic64_read_unchecked(&event->child_total_time_running);
58533
58534 barrier();
58535 ++userpg->lock;
58536@@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58537 values[n++] = perf_event_count(event);
58538 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58539 values[n++] = enabled +
58540- atomic64_read(&event->child_total_time_enabled);
58541+ atomic64_read_unchecked(&event->child_total_time_enabled);
58542 }
58543 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58544 values[n++] = running +
58545- atomic64_read(&event->child_total_time_running);
58546+ atomic64_read_unchecked(&event->child_total_time_running);
58547 }
58548 if (read_format & PERF_FORMAT_ID)
58549 values[n++] = primary_event_id(event);
58550@@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58551 event->parent = parent_event;
58552
58553 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58554- event->id = atomic64_inc_return(&perf_event_id);
58555+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
58556
58557 event->state = PERF_EVENT_STATE_INACTIVE;
58558
58559@@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58560 /*
58561 * Add back the child's count to the parent's count:
58562 */
58563- atomic64_add(child_val, &parent_event->child_count);
58564- atomic64_add(child_event->total_time_enabled,
58565+ atomic64_add_unchecked(child_val, &parent_event->child_count);
58566+ atomic64_add_unchecked(child_event->total_time_enabled,
58567 &parent_event->child_total_time_enabled);
58568- atomic64_add(child_event->total_time_running,
58569+ atomic64_add_unchecked(child_event->total_time_running,
58570 &parent_event->child_total_time_running);
58571
58572 /*
58573diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58574--- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58575+++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58576@@ -33,6 +33,7 @@
58577 #include <linux/rculist.h>
58578 #include <linux/bootmem.h>
58579 #include <linux/hash.h>
58580+#include <linux/security.h>
58581 #include <linux/pid_namespace.h>
58582 #include <linux/init_task.h>
58583 #include <linux/syscalls.h>
58584@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58585
58586 int pid_max = PID_MAX_DEFAULT;
58587
58588-#define RESERVED_PIDS 300
58589+#define RESERVED_PIDS 500
58590
58591 int pid_max_min = RESERVED_PIDS + 1;
58592 int pid_max_max = PID_MAX_LIMIT;
58593@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58594 */
58595 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58596 {
58597+ struct task_struct *task;
58598+
58599 rcu_lockdep_assert(rcu_read_lock_held());
58600- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58601+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58602+
58603+ if (gr_pid_is_chrooted(task))
58604+ return NULL;
58605+
58606+ return task;
58607 }
58608
58609 struct task_struct *find_task_by_vpid(pid_t vnr)
58610@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58611 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58612 }
58613
58614+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58615+{
58616+ rcu_lockdep_assert(rcu_read_lock_held());
58617+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58618+}
58619+
58620 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58621 {
58622 struct pid *pid;
58623diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58624--- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58625+++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58626@@ -6,6 +6,7 @@
58627 #include <linux/posix-timers.h>
58628 #include <linux/errno.h>
58629 #include <linux/math64.h>
58630+#include <linux/security.h>
58631 #include <asm/uaccess.h>
58632 #include <linux/kernel_stat.h>
58633 #include <trace/events/timer.h>
58634@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58635
58636 static __init int init_posix_cpu_timers(void)
58637 {
58638- struct k_clock process = {
58639+ static struct k_clock process = {
58640 .clock_getres = process_cpu_clock_getres,
58641 .clock_get = process_cpu_clock_get,
58642 .timer_create = process_cpu_timer_create,
58643 .nsleep = process_cpu_nsleep,
58644 .nsleep_restart = process_cpu_nsleep_restart,
58645 };
58646- struct k_clock thread = {
58647+ static struct k_clock thread = {
58648 .clock_getres = thread_cpu_clock_getres,
58649 .clock_get = thread_cpu_clock_get,
58650 .timer_create = thread_cpu_timer_create,
58651diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58652--- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58653+++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58654@@ -43,6 +43,7 @@
58655 #include <linux/idr.h>
58656 #include <linux/posix-clock.h>
58657 #include <linux/posix-timers.h>
58658+#include <linux/grsecurity.h>
58659 #include <linux/syscalls.h>
58660 #include <linux/wait.h>
58661 #include <linux/workqueue.h>
58662@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58663 * which we beg off on and pass to do_sys_settimeofday().
58664 */
58665
58666-static struct k_clock posix_clocks[MAX_CLOCKS];
58667+static struct k_clock *posix_clocks[MAX_CLOCKS];
58668
58669 /*
58670 * These ones are defined below.
58671@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58672 */
58673 static __init int init_posix_timers(void)
58674 {
58675- struct k_clock clock_realtime = {
58676+ static struct k_clock clock_realtime = {
58677 .clock_getres = hrtimer_get_res,
58678 .clock_get = posix_clock_realtime_get,
58679 .clock_set = posix_clock_realtime_set,
58680@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58681 .timer_get = common_timer_get,
58682 .timer_del = common_timer_del,
58683 };
58684- struct k_clock clock_monotonic = {
58685+ static struct k_clock clock_monotonic = {
58686 .clock_getres = hrtimer_get_res,
58687 .clock_get = posix_ktime_get_ts,
58688 .nsleep = common_nsleep,
58689@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58690 .timer_get = common_timer_get,
58691 .timer_del = common_timer_del,
58692 };
58693- struct k_clock clock_monotonic_raw = {
58694+ static struct k_clock clock_monotonic_raw = {
58695 .clock_getres = hrtimer_get_res,
58696 .clock_get = posix_get_monotonic_raw,
58697 };
58698- struct k_clock clock_realtime_coarse = {
58699+ static struct k_clock clock_realtime_coarse = {
58700 .clock_getres = posix_get_coarse_res,
58701 .clock_get = posix_get_realtime_coarse,
58702 };
58703- struct k_clock clock_monotonic_coarse = {
58704+ static struct k_clock clock_monotonic_coarse = {
58705 .clock_getres = posix_get_coarse_res,
58706 .clock_get = posix_get_monotonic_coarse,
58707 };
58708- struct k_clock clock_boottime = {
58709+ static struct k_clock clock_boottime = {
58710 .clock_getres = hrtimer_get_res,
58711 .clock_get = posix_get_boottime,
58712 .nsleep = common_nsleep,
58713@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58714 .timer_del = common_timer_del,
58715 };
58716
58717+ pax_track_stack();
58718+
58719 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58720 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58721 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58722@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58723 return;
58724 }
58725
58726- posix_clocks[clock_id] = *new_clock;
58727+ posix_clocks[clock_id] = new_clock;
58728 }
58729 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58730
58731@@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58732 return (id & CLOCKFD_MASK) == CLOCKFD ?
58733 &clock_posix_dynamic : &clock_posix_cpu;
58734
58735- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58736+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58737 return NULL;
58738- return &posix_clocks[id];
58739+ return posix_clocks[id];
58740 }
58741
58742 static int common_timer_create(struct k_itimer *new_timer)
58743@@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58744 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58745 return -EFAULT;
58746
58747+ /* only the CLOCK_REALTIME clock can be set, all other clocks
58748+ have their clock_set fptr set to a nosettime dummy function
58749+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58750+ call common_clock_set, which calls do_sys_settimeofday, which
58751+ we hook
58752+ */
58753+
58754 return kc->clock_set(which_clock, &new_tp);
58755 }
58756
58757diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58758--- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58759+++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58760@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58761 .enable_mask = SYSRQ_ENABLE_BOOT,
58762 };
58763
58764-static int pm_sysrq_init(void)
58765+static int __init pm_sysrq_init(void)
58766 {
58767 register_sysrq_key('o', &sysrq_poweroff_op);
58768 return 0;
58769diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58770--- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58771+++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58772@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58773 u64 elapsed_csecs64;
58774 unsigned int elapsed_csecs;
58775 bool wakeup = false;
58776+ bool timedout = false;
58777
58778 do_gettimeofday(&start);
58779
58780@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58781
58782 while (true) {
58783 todo = 0;
58784+ if (time_after(jiffies, end_time))
58785+ timedout = true;
58786 read_lock(&tasklist_lock);
58787 do_each_thread(g, p) {
58788 if (frozen(p) || !freezable(p))
58789@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58790 * try_to_stop() after schedule() in ptrace/signal
58791 * stop sees TIF_FREEZE.
58792 */
58793- if (!task_is_stopped_or_traced(p) &&
58794- !freezer_should_skip(p))
58795+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58796 todo++;
58797+ if (timedout) {
58798+ printk(KERN_ERR "Task refusing to freeze:\n");
58799+ sched_show_task(p);
58800+ }
58801+ }
58802 } while_each_thread(g, p);
58803 read_unlock(&tasklist_lock);
58804
58805@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58806 todo += wq_busy;
58807 }
58808
58809- if (!todo || time_after(jiffies, end_time))
58810+ if (!todo || timedout)
58811 break;
58812
58813 if (pm_wakeup_pending()) {
58814diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58815--- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58816+++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58817@@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58818 if (from_file && type != SYSLOG_ACTION_OPEN)
58819 return 0;
58820
58821+#ifdef CONFIG_GRKERNSEC_DMESG
58822+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58823+ return -EPERM;
58824+#endif
58825+
58826 if (syslog_action_restricted(type)) {
58827 if (capable(CAP_SYSLOG))
58828 return 0;
58829 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58830 if (capable(CAP_SYS_ADMIN)) {
58831- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58832+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58833 "but no CAP_SYSLOG (deprecated).\n");
58834 return 0;
58835 }
58836diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58837--- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58838+++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58839@@ -39,7 +39,7 @@ struct profile_hit {
58840 /* Oprofile timer tick hook */
58841 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58842
58843-static atomic_t *prof_buffer;
58844+static atomic_unchecked_t *prof_buffer;
58845 static unsigned long prof_len, prof_shift;
58846
58847 int prof_on __read_mostly;
58848@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58849 hits[i].pc = 0;
58850 continue;
58851 }
58852- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58853+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58854 hits[i].hits = hits[i].pc = 0;
58855 }
58856 }
58857@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58858 * Add the current hit(s) and flush the write-queue out
58859 * to the global buffer:
58860 */
58861- atomic_add(nr_hits, &prof_buffer[pc]);
58862+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58863 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58864- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58865+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58866 hits[i].pc = hits[i].hits = 0;
58867 }
58868 out:
58869@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58870 if (prof_on != type || !prof_buffer)
58871 return;
58872 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58873- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58874+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58875 }
58876 #endif /* !CONFIG_SMP */
58877 EXPORT_SYMBOL_GPL(profile_hits);
58878@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58879 return -EFAULT;
58880 buf++; p++; count--; read++;
58881 }
58882- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58883+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58884 if (copy_to_user(buf, (void *)pnt, count))
58885 return -EFAULT;
58886 read += count;
58887@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58888 }
58889 #endif
58890 profile_discard_flip_buffers();
58891- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58892+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58893 return count;
58894 }
58895
58896diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58897--- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58898+++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58899@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58900 return ret;
58901 }
58902
58903-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58904+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58905+ unsigned int log)
58906 {
58907 const struct cred *cred = current_cred(), *tcred;
58908
58909@@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58910 cred->gid == tcred->sgid &&
58911 cred->gid == tcred->gid))
58912 goto ok;
58913- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58914+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58915+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58916 goto ok;
58917 rcu_read_unlock();
58918 return -EPERM;
58919@@ -152,7 +154,9 @@ ok:
58920 smp_rmb();
58921 if (task->mm)
58922 dumpable = get_dumpable(task->mm);
58923- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58924+ if (!dumpable &&
58925+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58926+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58927 return -EPERM;
58928
58929 return security_ptrace_access_check(task, mode);
58930@@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58931 {
58932 int err;
58933 task_lock(task);
58934- err = __ptrace_may_access(task, mode);
58935+ err = __ptrace_may_access(task, mode, 0);
58936+ task_unlock(task);
58937+ return !err;
58938+}
58939+
58940+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58941+{
58942+ int err;
58943+ task_lock(task);
58944+ err = __ptrace_may_access(task, mode, 1);
58945 task_unlock(task);
58946 return !err;
58947 }
58948@@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58949 goto out;
58950
58951 task_lock(task);
58952- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58953+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58954 task_unlock(task);
58955 if (retval)
58956 goto unlock_creds;
58957@@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58958 goto unlock_tasklist;
58959
58960 task->ptrace = PT_PTRACED;
58961- if (task_ns_capable(task, CAP_SYS_PTRACE))
58962+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58963 task->ptrace |= PT_PTRACE_CAP;
58964
58965 __ptrace_link(task, current);
58966@@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58967 {
58968 int copied = 0;
58969
58970+ pax_track_stack();
58971+
58972 while (len > 0) {
58973 char buf[128];
58974 int this_len, retval;
58975@@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
58976 break;
58977 return -EIO;
58978 }
58979- if (copy_to_user(dst, buf, retval))
58980+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58981 return -EFAULT;
58982 copied += retval;
58983 src += retval;
58984@@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
58985 {
58986 int copied = 0;
58987
58988+ pax_track_stack();
58989+
58990 while (len > 0) {
58991 char buf[128];
58992 int this_len, retval;
58993@@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
58994 {
58995 int ret = -EIO;
58996 siginfo_t siginfo;
58997- void __user *datavp = (void __user *) data;
58998+ void __user *datavp = (__force void __user *) data;
58999 unsigned long __user *datalp = datavp;
59000
59001+ pax_track_stack();
59002+
59003 switch (request) {
59004 case PTRACE_PEEKTEXT:
59005 case PTRACE_PEEKDATA:
59006@@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
59007 goto out;
59008 }
59009
59010+ if (gr_handle_ptrace(child, request)) {
59011+ ret = -EPERM;
59012+ goto out_put_task_struct;
59013+ }
59014+
59015 if (request == PTRACE_ATTACH) {
59016 ret = ptrace_attach(child);
59017 /*
59018 * Some architectures need to do book-keeping after
59019 * a ptrace attach.
59020 */
59021- if (!ret)
59022+ if (!ret) {
59023 arch_ptrace_attach(child);
59024+ gr_audit_ptrace(child);
59025+ }
59026 goto out_put_task_struct;
59027 }
59028
59029@@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
59030 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
59031 if (copied != sizeof(tmp))
59032 return -EIO;
59033- return put_user(tmp, (unsigned long __user *)data);
59034+ return put_user(tmp, (__force unsigned long __user *)data);
59035 }
59036
59037 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
59038@@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
59039 siginfo_t siginfo;
59040 int ret;
59041
59042+ pax_track_stack();
59043+
59044 switch (request) {
59045 case PTRACE_PEEKTEXT:
59046 case PTRACE_PEEKDATA:
59047@@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
59048 goto out;
59049 }
59050
59051+ if (gr_handle_ptrace(child, request)) {
59052+ ret = -EPERM;
59053+ goto out_put_task_struct;
59054+ }
59055+
59056 if (request == PTRACE_ATTACH) {
59057 ret = ptrace_attach(child);
59058 /*
59059 * Some architectures need to do book-keeping after
59060 * a ptrace attach.
59061 */
59062- if (!ret)
59063+ if (!ret) {
59064 arch_ptrace_attach(child);
59065+ gr_audit_ptrace(child);
59066+ }
59067 goto out_put_task_struct;
59068 }
59069
59070diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
59071--- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
59072+++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
59073@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
59074 { 0 };
59075 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
59076 { 0 };
59077-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59078-static atomic_t n_rcu_torture_alloc;
59079-static atomic_t n_rcu_torture_alloc_fail;
59080-static atomic_t n_rcu_torture_free;
59081-static atomic_t n_rcu_torture_mberror;
59082-static atomic_t n_rcu_torture_error;
59083+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59084+static atomic_unchecked_t n_rcu_torture_alloc;
59085+static atomic_unchecked_t n_rcu_torture_alloc_fail;
59086+static atomic_unchecked_t n_rcu_torture_free;
59087+static atomic_unchecked_t n_rcu_torture_mberror;
59088+static atomic_unchecked_t n_rcu_torture_error;
59089 static long n_rcu_torture_boost_ktrerror;
59090 static long n_rcu_torture_boost_rterror;
59091 static long n_rcu_torture_boost_allocerror;
59092@@ -225,11 +225,11 @@ rcu_torture_alloc(void)
59093
59094 spin_lock_bh(&rcu_torture_lock);
59095 if (list_empty(&rcu_torture_freelist)) {
59096- atomic_inc(&n_rcu_torture_alloc_fail);
59097+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
59098 spin_unlock_bh(&rcu_torture_lock);
59099 return NULL;
59100 }
59101- atomic_inc(&n_rcu_torture_alloc);
59102+ atomic_inc_unchecked(&n_rcu_torture_alloc);
59103 p = rcu_torture_freelist.next;
59104 list_del_init(p);
59105 spin_unlock_bh(&rcu_torture_lock);
59106@@ -242,7 +242,7 @@ rcu_torture_alloc(void)
59107 static void
59108 rcu_torture_free(struct rcu_torture *p)
59109 {
59110- atomic_inc(&n_rcu_torture_free);
59111+ atomic_inc_unchecked(&n_rcu_torture_free);
59112 spin_lock_bh(&rcu_torture_lock);
59113 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
59114 spin_unlock_bh(&rcu_torture_lock);
59115@@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
59116 i = rp->rtort_pipe_count;
59117 if (i > RCU_TORTURE_PIPE_LEN)
59118 i = RCU_TORTURE_PIPE_LEN;
59119- atomic_inc(&rcu_torture_wcount[i]);
59120+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
59121 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59122 rp->rtort_mbtest = 0;
59123 rcu_torture_free(rp);
59124@@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
59125 i = rp->rtort_pipe_count;
59126 if (i > RCU_TORTURE_PIPE_LEN)
59127 i = RCU_TORTURE_PIPE_LEN;
59128- atomic_inc(&rcu_torture_wcount[i]);
59129+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
59130 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59131 rp->rtort_mbtest = 0;
59132 list_del(&rp->rtort_free);
59133@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
59134 i = old_rp->rtort_pipe_count;
59135 if (i > RCU_TORTURE_PIPE_LEN)
59136 i = RCU_TORTURE_PIPE_LEN;
59137- atomic_inc(&rcu_torture_wcount[i]);
59138+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
59139 old_rp->rtort_pipe_count++;
59140 cur_ops->deferred_free(old_rp);
59141 }
59142@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
59143 return;
59144 }
59145 if (p->rtort_mbtest == 0)
59146- atomic_inc(&n_rcu_torture_mberror);
59147+ atomic_inc_unchecked(&n_rcu_torture_mberror);
59148 spin_lock(&rand_lock);
59149 cur_ops->read_delay(&rand);
59150 n_rcu_torture_timers++;
59151@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59152 continue;
59153 }
59154 if (p->rtort_mbtest == 0)
59155- atomic_inc(&n_rcu_torture_mberror);
59156+ atomic_inc_unchecked(&n_rcu_torture_mberror);
59157 cur_ops->read_delay(&rand);
59158 preempt_disable();
59159 pipe_count = p->rtort_pipe_count;
59160@@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59161 rcu_torture_current,
59162 rcu_torture_current_version,
59163 list_empty(&rcu_torture_freelist),
59164- atomic_read(&n_rcu_torture_alloc),
59165- atomic_read(&n_rcu_torture_alloc_fail),
59166- atomic_read(&n_rcu_torture_free),
59167- atomic_read(&n_rcu_torture_mberror),
59168+ atomic_read_unchecked(&n_rcu_torture_alloc),
59169+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59170+ atomic_read_unchecked(&n_rcu_torture_free),
59171+ atomic_read_unchecked(&n_rcu_torture_mberror),
59172 n_rcu_torture_boost_ktrerror,
59173 n_rcu_torture_boost_rterror,
59174 n_rcu_torture_boost_allocerror,
59175@@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59176 n_rcu_torture_boost_failure,
59177 n_rcu_torture_boosts,
59178 n_rcu_torture_timers);
59179- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59180+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59181 n_rcu_torture_boost_ktrerror != 0 ||
59182 n_rcu_torture_boost_rterror != 0 ||
59183 n_rcu_torture_boost_allocerror != 0 ||
59184@@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59185 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59186 if (i > 1) {
59187 cnt += sprintf(&page[cnt], "!!! ");
59188- atomic_inc(&n_rcu_torture_error);
59189+ atomic_inc_unchecked(&n_rcu_torture_error);
59190 WARN_ON_ONCE(1);
59191 }
59192 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59193@@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59194 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59196 cnt += sprintf(&page[cnt], " %d",
59197- atomic_read(&rcu_torture_wcount[i]));
59198+ atomic_read_unchecked(&rcu_torture_wcount[i]));
59199 }
59200 cnt += sprintf(&page[cnt], "\n");
59201 if (cur_ops->stats)
59202@@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59203
59204 if (cur_ops->cleanup)
59205 cur_ops->cleanup();
59206- if (atomic_read(&n_rcu_torture_error))
59207+ if (atomic_read_unchecked(&n_rcu_torture_error))
59208 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59209 else
59210 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59211@@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59212
59213 rcu_torture_current = NULL;
59214 rcu_torture_current_version = 0;
59215- atomic_set(&n_rcu_torture_alloc, 0);
59216- atomic_set(&n_rcu_torture_alloc_fail, 0);
59217- atomic_set(&n_rcu_torture_free, 0);
59218- atomic_set(&n_rcu_torture_mberror, 0);
59219- atomic_set(&n_rcu_torture_error, 0);
59220+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59221+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59222+ atomic_set_unchecked(&n_rcu_torture_free, 0);
59223+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59224+ atomic_set_unchecked(&n_rcu_torture_error, 0);
59225 n_rcu_torture_boost_ktrerror = 0;
59226 n_rcu_torture_boost_rterror = 0;
59227 n_rcu_torture_boost_allocerror = 0;
59228@@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59229 n_rcu_torture_boost_failure = 0;
59230 n_rcu_torture_boosts = 0;
59231 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59232- atomic_set(&rcu_torture_wcount[i], 0);
59233+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59234 for_each_possible_cpu(cpu) {
59235 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59236 per_cpu(rcu_torture_count, cpu)[i] = 0;
59237diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59238--- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59239+++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59240@@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59241 /*
59242 * Do softirq processing for the current CPU.
59243 */
59244-static void rcu_process_callbacks(struct softirq_action *unused)
59245+static void rcu_process_callbacks(void)
59246 {
59247 /*
59248 * Memory references from any prior RCU read-side critical sections
59249diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59250--- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59251+++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59252@@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59253
59254 /* Clean up and exit. */
59255 smp_mb(); /* ensure expedited GP seen before counter increment. */
59256- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59257+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59258 unlock_mb_ret:
59259 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59260 mb_ret:
59261@@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59262
59263 #else /* #ifndef CONFIG_SMP */
59264
59265-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59266-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59267+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59268+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59269
59270 static int synchronize_sched_expedited_cpu_stop(void *data)
59271 {
59272@@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59273 int firstsnap, s, snap, trycount = 0;
59274
59275 /* Note that atomic_inc_return() implies full memory barrier. */
59276- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59277+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59278 get_online_cpus();
59279
59280 /*
59281@@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59282 }
59283
59284 /* Check to see if someone else did our work for us. */
59285- s = atomic_read(&sync_sched_expedited_done);
59286+ s = atomic_read_unchecked(&sync_sched_expedited_done);
59287 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59288 smp_mb(); /* ensure test happens before caller kfree */
59289 return;
59290@@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59291 * grace period works for us.
59292 */
59293 get_online_cpus();
59294- snap = atomic_read(&sync_sched_expedited_started) - 1;
59295+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59296 smp_mb(); /* ensure read is before try_stop_cpus(). */
59297 }
59298
59299@@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59300 * than we did beat us to the punch.
59301 */
59302 do {
59303- s = atomic_read(&sync_sched_expedited_done);
59304+ s = atomic_read_unchecked(&sync_sched_expedited_done);
59305 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59306 smp_mb(); /* ensure test happens before caller kfree */
59307 break;
59308 }
59309- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59310+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59311
59312 put_online_cpus();
59313 }
59314diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59315--- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59316+++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59317@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59318 };
59319 ssize_t ret;
59320
59321+ pax_track_stack();
59322+
59323 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59324 return 0;
59325 if (splice_grow_spd(pipe, &spd))
59326diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59327--- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59328+++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59329@@ -133,8 +133,18 @@ static const struct file_operations proc
59330
59331 static int __init ioresources_init(void)
59332 {
59333+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59334+#ifdef CONFIG_GRKERNSEC_PROC_USER
59335+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59336+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59337+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59338+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59339+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59340+#endif
59341+#else
59342 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59343 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59344+#endif
59345 return 0;
59346 }
59347 __initcall(ioresources_init);
59348diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59349--- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59350+++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59351@@ -20,7 +20,7 @@
59352 #define MAX_RT_TEST_MUTEXES 8
59353
59354 static spinlock_t rttest_lock;
59355-static atomic_t rttest_event;
59356+static atomic_unchecked_t rttest_event;
59357
59358 struct test_thread_data {
59359 int opcode;
59360@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59361
59362 case RTTEST_LOCKCONT:
59363 td->mutexes[td->opdata] = 1;
59364- td->event = atomic_add_return(1, &rttest_event);
59365+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59366 return 0;
59367
59368 case RTTEST_RESET:
59369@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59370 return 0;
59371
59372 case RTTEST_RESETEVENT:
59373- atomic_set(&rttest_event, 0);
59374+ atomic_set_unchecked(&rttest_event, 0);
59375 return 0;
59376
59377 default:
59378@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59379 return ret;
59380
59381 td->mutexes[id] = 1;
59382- td->event = atomic_add_return(1, &rttest_event);
59383+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59384 rt_mutex_lock(&mutexes[id]);
59385- td->event = atomic_add_return(1, &rttest_event);
59386+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59387 td->mutexes[id] = 4;
59388 return 0;
59389
59390@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59391 return ret;
59392
59393 td->mutexes[id] = 1;
59394- td->event = atomic_add_return(1, &rttest_event);
59395+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59396 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59397- td->event = atomic_add_return(1, &rttest_event);
59398+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59399 td->mutexes[id] = ret ? 0 : 4;
59400 return ret ? -EINTR : 0;
59401
59402@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59403 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59404 return ret;
59405
59406- td->event = atomic_add_return(1, &rttest_event);
59407+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59408 rt_mutex_unlock(&mutexes[id]);
59409- td->event = atomic_add_return(1, &rttest_event);
59410+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59411 td->mutexes[id] = 0;
59412 return 0;
59413
59414@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59415 break;
59416
59417 td->mutexes[dat] = 2;
59418- td->event = atomic_add_return(1, &rttest_event);
59419+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59420 break;
59421
59422 default:
59423@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59424 return;
59425
59426 td->mutexes[dat] = 3;
59427- td->event = atomic_add_return(1, &rttest_event);
59428+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59429 break;
59430
59431 case RTTEST_LOCKNOWAIT:
59432@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59433 return;
59434
59435 td->mutexes[dat] = 1;
59436- td->event = atomic_add_return(1, &rttest_event);
59437+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59438 return;
59439
59440 default:
59441diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59442--- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59443+++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59444@@ -7,7 +7,7 @@
59445
59446 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59447 static struct autogroup autogroup_default;
59448-static atomic_t autogroup_seq_nr;
59449+static atomic_unchecked_t autogroup_seq_nr;
59450
59451 static void __init autogroup_init(struct task_struct *init_task)
59452 {
59453@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59454
59455 kref_init(&ag->kref);
59456 init_rwsem(&ag->lock);
59457- ag->id = atomic_inc_return(&autogroup_seq_nr);
59458+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59459 ag->tg = tg;
59460 #ifdef CONFIG_RT_GROUP_SCHED
59461 /*
59462diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59463--- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59464+++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59465@@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59466 struct rq *rq;
59467 int cpu;
59468
59469+ pax_track_stack();
59470+
59471 need_resched:
59472 preempt_disable();
59473 cpu = smp_processor_id();
59474@@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59475 * Look out! "owner" is an entirely speculative pointer
59476 * access and not reliable.
59477 */
59478-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59479+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59480 {
59481 unsigned int cpu;
59482 struct rq *rq;
59483@@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59484 * DEBUG_PAGEALLOC could have unmapped it if
59485 * the mutex owner just released it and exited.
59486 */
59487- if (probe_kernel_address(&owner->cpu, cpu))
59488+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59489 return 0;
59490 #else
59491- cpu = owner->cpu;
59492+ cpu = task_thread_info(owner)->cpu;
59493 #endif
59494
59495 /*
59496@@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59497 /*
59498 * Is that owner really running on that cpu?
59499 */
59500- if (task_thread_info(rq->curr) != owner || need_resched())
59501+ if (rq->curr != owner || need_resched())
59502 return 0;
59503
59504 arch_mutex_cpu_relax();
59505@@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59506 /* convert nice value [19,-20] to rlimit style value [1,40] */
59507 int nice_rlim = 20 - nice;
59508
59509+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59510+
59511 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59512 capable(CAP_SYS_NICE));
59513 }
59514@@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59515 if (nice > 19)
59516 nice = 19;
59517
59518- if (increment < 0 && !can_nice(current, nice))
59519+ if (increment < 0 && (!can_nice(current, nice) ||
59520+ gr_handle_chroot_nice()))
59521 return -EPERM;
59522
59523 retval = security_task_setnice(current, nice);
59524@@ -4957,6 +4962,7 @@ recheck:
59525 unsigned long rlim_rtprio =
59526 task_rlimit(p, RLIMIT_RTPRIO);
59527
59528+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59529 /* can't set/change the rt policy */
59530 if (policy != p->policy && !rlim_rtprio)
59531 return -EPERM;
59532@@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59533 long power;
59534 int weight;
59535
59536- WARN_ON(!sd || !sd->groups);
59537+ BUG_ON(!sd || !sd->groups);
59538
59539 if (cpu != group_first_cpu(sd->groups))
59540 return;
59541diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59542--- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59543+++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59544@@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59545 * run_rebalance_domains is triggered when needed from the scheduler tick.
59546 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59547 */
59548-static void run_rebalance_domains(struct softirq_action *h)
59549+static void run_rebalance_domains(void)
59550 {
59551 int this_cpu = smp_processor_id();
59552 struct rq *this_rq = cpu_rq(this_cpu);
59553diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59554--- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59555+++ linux-2.6.39.4/kernel/signal.c 2011-08-16 21:16:33.000000000 -0400
59556@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59557
59558 int print_fatal_signals __read_mostly;
59559
59560-static void __user *sig_handler(struct task_struct *t, int sig)
59561+static __sighandler_t sig_handler(struct task_struct *t, int sig)
59562 {
59563 return t->sighand->action[sig - 1].sa.sa_handler;
59564 }
59565
59566-static int sig_handler_ignored(void __user *handler, int sig)
59567+static int sig_handler_ignored(__sighandler_t handler, int sig)
59568 {
59569 /* Is it explicitly or implicitly ignored? */
59570 return handler == SIG_IGN ||
59571@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59572 static int sig_task_ignored(struct task_struct *t, int sig,
59573 int from_ancestor_ns)
59574 {
59575- void __user *handler;
59576+ __sighandler_t handler;
59577
59578 handler = sig_handler(t, sig);
59579
59580@@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59581 atomic_inc(&user->sigpending);
59582 rcu_read_unlock();
59583
59584+ if (!override_rlimit)
59585+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59586+
59587 if (override_rlimit ||
59588 atomic_read(&user->sigpending) <=
59589 task_rlimit(t, RLIMIT_SIGPENDING)) {
59590@@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59591
59592 int unhandled_signal(struct task_struct *tsk, int sig)
59593 {
59594- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59595+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59596 if (is_global_init(tsk))
59597 return 1;
59598 if (handler != SIG_IGN && handler != SIG_DFL)
59599@@ -693,6 +696,13 @@ static int check_kill_permission(int sig
59600 }
59601 }
59602
59603+ /* allow glibc communication via tgkill to other threads in our
59604+ thread group */
59605+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59606+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59607+ && gr_handle_signal(t, sig))
59608+ return -EPERM;
59609+
59610 return security_task_kill(t, info, sig, 0);
59611 }
59612
59613@@ -1041,7 +1051,7 @@ __group_send_sig_info(int sig, struct si
59614 return send_signal(sig, info, p, 1);
59615 }
59616
59617-static int
59618+int
59619 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59620 {
59621 return send_signal(sig, info, t, 0);
59622@@ -1078,6 +1088,7 @@ force_sig_info(int sig, struct siginfo *
59623 unsigned long int flags;
59624 int ret, blocked, ignored;
59625 struct k_sigaction *action;
59626+ int is_unhandled = 0;
59627
59628 spin_lock_irqsave(&t->sighand->siglock, flags);
59629 action = &t->sighand->action[sig-1];
59630@@ -1092,9 +1103,18 @@ force_sig_info(int sig, struct siginfo *
59631 }
59632 if (action->sa.sa_handler == SIG_DFL)
59633 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59634+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59635+ is_unhandled = 1;
59636 ret = specific_send_sig_info(sig, info, t);
59637 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59638
59639+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
59640+ normal operation */
59641+ if (is_unhandled) {
59642+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59643+ gr_handle_crash(t, sig);
59644+ }
59645+
59646 return ret;
59647 }
59648
59649@@ -1153,8 +1173,11 @@ int group_send_sig_info(int sig, struct
59650 ret = check_kill_permission(sig, info, p);
59651 rcu_read_unlock();
59652
59653- if (!ret && sig)
59654+ if (!ret && sig) {
59655 ret = do_send_sig_info(sig, info, p, true);
59656+ if (!ret)
59657+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59658+ }
59659
59660 return ret;
59661 }
59662@@ -1718,6 +1741,8 @@ void ptrace_notify(int exit_code)
59663 {
59664 siginfo_t info;
59665
59666+ pax_track_stack();
59667+
59668 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59669
59670 memset(&info, 0, sizeof info);
59671@@ -2393,7 +2418,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59672 int error = -ESRCH;
59673
59674 rcu_read_lock();
59675- p = find_task_by_vpid(pid);
59676+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59677+ /* allow glibc communication via tgkill to other threads in our
59678+ thread group */
59679+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59680+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
59681+ p = find_task_by_vpid_unrestricted(pid);
59682+ else
59683+#endif
59684+ p = find_task_by_vpid(pid);
59685 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59686 error = check_kill_permission(sig, info, p);
59687 /*
59688diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59689--- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59690+++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59691@@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59692 }
59693 EXPORT_SYMBOL(smp_call_function);
59694
59695-void ipi_call_lock(void)
59696+void ipi_call_lock(void) __acquires(call_function.lock)
59697 {
59698 raw_spin_lock(&call_function.lock);
59699 }
59700
59701-void ipi_call_unlock(void)
59702+void ipi_call_unlock(void) __releases(call_function.lock)
59703 {
59704 raw_spin_unlock(&call_function.lock);
59705 }
59706
59707-void ipi_call_lock_irq(void)
59708+void ipi_call_lock_irq(void) __acquires(call_function.lock)
59709 {
59710 raw_spin_lock_irq(&call_function.lock);
59711 }
59712
59713-void ipi_call_unlock_irq(void)
59714+void ipi_call_unlock_irq(void) __releases(call_function.lock)
59715 {
59716 raw_spin_unlock_irq(&call_function.lock);
59717 }
59718diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59719--- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59720+++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59721@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59722
59723 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59724
59725-char *softirq_to_name[NR_SOFTIRQS] = {
59726+const char * const softirq_to_name[NR_SOFTIRQS] = {
59727 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59728 "TASKLET", "SCHED", "HRTIMER", "RCU"
59729 };
59730@@ -235,7 +235,7 @@ restart:
59731 kstat_incr_softirqs_this_cpu(vec_nr);
59732
59733 trace_softirq_entry(vec_nr);
59734- h->action(h);
59735+ h->action();
59736 trace_softirq_exit(vec_nr);
59737 if (unlikely(prev_count != preempt_count())) {
59738 printk(KERN_ERR "huh, entered softirq %u %s %p"
59739@@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59740 local_irq_restore(flags);
59741 }
59742
59743-void open_softirq(int nr, void (*action)(struct softirq_action *))
59744+void open_softirq(int nr, void (*action)(void))
59745 {
59746- softirq_vec[nr].action = action;
59747+ pax_open_kernel();
59748+ *(void **)&softirq_vec[nr].action = action;
59749+ pax_close_kernel();
59750 }
59751
59752 /*
59753@@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59754
59755 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59756
59757-static void tasklet_action(struct softirq_action *a)
59758+static void tasklet_action(void)
59759 {
59760 struct tasklet_struct *list;
59761
59762@@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59763 }
59764 }
59765
59766-static void tasklet_hi_action(struct softirq_action *a)
59767+static void tasklet_hi_action(void)
59768 {
59769 struct tasklet_struct *list;
59770
59771diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59772--- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59773+++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59774@@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59775 error = -EACCES;
59776 goto out;
59777 }
59778+
59779+ if (gr_handle_chroot_setpriority(p, niceval)) {
59780+ error = -EACCES;
59781+ goto out;
59782+ }
59783+
59784 no_nice = security_task_setnice(p, niceval);
59785 if (no_nice) {
59786 error = no_nice;
59787@@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59788 goto error;
59789 }
59790
59791+ if (gr_check_group_change(new->gid, new->egid, -1))
59792+ goto error;
59793+
59794 if (rgid != (gid_t) -1 ||
59795 (egid != (gid_t) -1 && egid != old->gid))
59796 new->sgid = new->egid;
59797@@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59798 old = current_cred();
59799
59800 retval = -EPERM;
59801+
59802+ if (gr_check_group_change(gid, gid, gid))
59803+ goto error;
59804+
59805 if (nsown_capable(CAP_SETGID))
59806 new->gid = new->egid = new->sgid = new->fsgid = gid;
59807 else if (gid == old->gid || gid == old->sgid)
59808@@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59809 goto error;
59810 }
59811
59812+ if (gr_check_user_change(new->uid, new->euid, -1))
59813+ goto error;
59814+
59815 if (new->uid != old->uid) {
59816 retval = set_user(new);
59817 if (retval < 0)
59818@@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59819 old = current_cred();
59820
59821 retval = -EPERM;
59822+
59823+ if (gr_check_crash_uid(uid))
59824+ goto error;
59825+ if (gr_check_user_change(uid, uid, uid))
59826+ goto error;
59827+
59828 if (nsown_capable(CAP_SETUID)) {
59829 new->suid = new->uid = uid;
59830 if (uid != old->uid) {
59831@@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59832 goto error;
59833 }
59834
59835+ if (gr_check_user_change(ruid, euid, -1))
59836+ goto error;
59837+
59838 if (ruid != (uid_t) -1) {
59839 new->uid = ruid;
59840 if (ruid != old->uid) {
59841@@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59842 goto error;
59843 }
59844
59845+ if (gr_check_group_change(rgid, egid, -1))
59846+ goto error;
59847+
59848 if (rgid != (gid_t) -1)
59849 new->gid = rgid;
59850 if (egid != (gid_t) -1)
59851@@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59852 old = current_cred();
59853 old_fsuid = old->fsuid;
59854
59855+ if (gr_check_user_change(-1, -1, uid))
59856+ goto error;
59857+
59858 if (uid == old->uid || uid == old->euid ||
59859 uid == old->suid || uid == old->fsuid ||
59860 nsown_capable(CAP_SETUID)) {
59861@@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59862 }
59863 }
59864
59865+error:
59866 abort_creds(new);
59867 return old_fsuid;
59868
59869@@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59870 if (gid == old->gid || gid == old->egid ||
59871 gid == old->sgid || gid == old->fsgid ||
59872 nsown_capable(CAP_SETGID)) {
59873+ if (gr_check_group_change(-1, -1, gid))
59874+ goto error;
59875+
59876 if (gid != old_fsgid) {
59877 new->fsgid = gid;
59878 goto change_okay;
59879 }
59880 }
59881
59882+error:
59883 abort_creds(new);
59884 return old_fsgid;
59885
59886@@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59887 error = get_dumpable(me->mm);
59888 break;
59889 case PR_SET_DUMPABLE:
59890- if (arg2 < 0 || arg2 > 1) {
59891+ if (arg2 > 1) {
59892 error = -EINVAL;
59893 break;
59894 }
59895diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59896--- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59897+++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59898@@ -84,6 +84,13 @@
59899
59900
59901 #if defined(CONFIG_SYSCTL)
59902+#include <linux/grsecurity.h>
59903+#include <linux/grinternal.h>
59904+
59905+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59906+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59907+ const int op);
59908+extern int gr_handle_chroot_sysctl(const int op);
59909
59910 /* External variables not in a header file. */
59911 extern int sysctl_overcommit_memory;
59912@@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59913 }
59914
59915 #endif
59916+extern struct ctl_table grsecurity_table[];
59917
59918 static struct ctl_table root_table[];
59919 static struct ctl_table_root sysctl_table_root;
59920@@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59921 int sysctl_legacy_va_layout;
59922 #endif
59923
59924+#ifdef CONFIG_PAX_SOFTMODE
59925+static ctl_table pax_table[] = {
59926+ {
59927+ .procname = "softmode",
59928+ .data = &pax_softmode,
59929+ .maxlen = sizeof(unsigned int),
59930+ .mode = 0600,
59931+ .proc_handler = &proc_dointvec,
59932+ },
59933+
59934+ { }
59935+};
59936+#endif
59937+
59938 /* The default sysctl tables: */
59939
59940 static struct ctl_table root_table[] = {
59941@@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59942 #endif
59943
59944 static struct ctl_table kern_table[] = {
59945+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59946+ {
59947+ .procname = "grsecurity",
59948+ .mode = 0500,
59949+ .child = grsecurity_table,
59950+ },
59951+#endif
59952+
59953+#ifdef CONFIG_PAX_SOFTMODE
59954+ {
59955+ .procname = "pax",
59956+ .mode = 0500,
59957+ .child = pax_table,
59958+ },
59959+#endif
59960+
59961 {
59962 .procname = "sched_child_runs_first",
59963 .data = &sysctl_sched_child_runs_first,
59964@@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59965 .data = &modprobe_path,
59966 .maxlen = KMOD_PATH_LEN,
59967 .mode = 0644,
59968- .proc_handler = proc_dostring,
59969+ .proc_handler = proc_dostring_modpriv,
59970 },
59971 {
59972 .procname = "modules_disabled",
59973@@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
59974 .extra1 = &zero,
59975 .extra2 = &one,
59976 },
59977+#endif
59978 {
59979 .procname = "kptr_restrict",
59980 .data = &kptr_restrict,
59981 .maxlen = sizeof(int),
59982 .mode = 0644,
59983 .proc_handler = proc_dmesg_restrict,
59984+#ifdef CONFIG_GRKERNSEC_HIDESYM
59985+ .extra1 = &two,
59986+#else
59987 .extra1 = &zero,
59988+#endif
59989 .extra2 = &two,
59990 },
59991-#endif
59992 {
59993 .procname = "ngroups_max",
59994 .data = &ngroups_max,
59995@@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
59996 .proc_handler = proc_dointvec_minmax,
59997 .extra1 = &zero,
59998 },
59999+ {
60000+ .procname = "heap_stack_gap",
60001+ .data = &sysctl_heap_stack_gap,
60002+ .maxlen = sizeof(sysctl_heap_stack_gap),
60003+ .mode = 0644,
60004+ .proc_handler = proc_doulongvec_minmax,
60005+ },
60006 #else
60007 {
60008 .procname = "nr_trim_pages",
60009@@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
60010 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
60011 {
60012 int mode;
60013+ int error;
60014+
60015+ if (table->parent != NULL && table->parent->procname != NULL &&
60016+ table->procname != NULL &&
60017+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
60018+ return -EACCES;
60019+ if (gr_handle_chroot_sysctl(op))
60020+ return -EACCES;
60021+ error = gr_handle_sysctl(table, op);
60022+ if (error)
60023+ return error;
60024
60025 if (root->permissions)
60026 mode = root->permissions(root, current->nsproxy, table);
60027@@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
60028 buffer, lenp, ppos);
60029 }
60030
60031+int proc_dostring_modpriv(struct ctl_table *table, int write,
60032+ void __user *buffer, size_t *lenp, loff_t *ppos)
60033+{
60034+ if (write && !capable(CAP_SYS_MODULE))
60035+ return -EPERM;
60036+
60037+ return _proc_do_string(table->data, table->maxlen, write,
60038+ buffer, lenp, ppos);
60039+}
60040+
60041 static size_t proc_skip_spaces(char **buf)
60042 {
60043 size_t ret;
60044@@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
60045 len = strlen(tmp);
60046 if (len > *size)
60047 len = *size;
60048+ if (len > sizeof(tmp))
60049+ len = sizeof(tmp);
60050 if (copy_to_user(*buf, tmp, len))
60051 return -EFAULT;
60052 *size -= len;
60053@@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
60054 *i = val;
60055 } else {
60056 val = convdiv * (*i) / convmul;
60057- if (!first)
60058+ if (!first) {
60059 err = proc_put_char(&buffer, &left, '\t');
60060+ if (err)
60061+ break;
60062+ }
60063 err = proc_put_long(&buffer, &left, val, false);
60064 if (err)
60065 break;
60066@@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
60067 return -ENOSYS;
60068 }
60069
60070+int proc_dostring_modpriv(struct ctl_table *table, int write,
60071+ void __user *buffer, size_t *lenp, loff_t *ppos)
60072+{
60073+ return -ENOSYS;
60074+}
60075+
60076 int proc_dointvec(struct ctl_table *table, int write,
60077 void __user *buffer, size_t *lenp, loff_t *ppos)
60078 {
60079@@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
60080 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
60081 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
60082 EXPORT_SYMBOL(proc_dostring);
60083+EXPORT_SYMBOL(proc_dostring_modpriv);
60084 EXPORT_SYMBOL(proc_doulongvec_minmax);
60085 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
60086 EXPORT_SYMBOL(register_sysctl_table);
60087diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
60088--- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
60089+++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
60090@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
60091 set_fail(&fail, table, "Directory with extra2");
60092 } else {
60093 if ((table->proc_handler == proc_dostring) ||
60094+ (table->proc_handler == proc_dostring_modpriv) ||
60095 (table->proc_handler == proc_dointvec) ||
60096 (table->proc_handler == proc_dointvec_minmax) ||
60097 (table->proc_handler == proc_dointvec_jiffies) ||
60098diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
60099--- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
60100+++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
60101@@ -27,9 +27,12 @@
60102 #include <linux/cgroup.h>
60103 #include <linux/fs.h>
60104 #include <linux/file.h>
60105+#include <linux/grsecurity.h>
60106 #include <net/genetlink.h>
60107 #include <asm/atomic.h>
60108
60109+extern int gr_is_taskstats_denied(int pid);
60110+
60111 /*
60112 * Maximum length of a cpumask that can be specified in
60113 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
60114@@ -558,6 +561,9 @@ err:
60115
60116 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
60117 {
60118+ if (gr_is_taskstats_denied(current->pid))
60119+ return -EACCES;
60120+
60121 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
60122 return cmd_attr_register_cpumask(info);
60123 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
60124diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
60125--- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
60126+++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
60127@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
60128 * then clear the broadcast bit.
60129 */
60130 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
60131- int cpu = smp_processor_id();
60132+ cpu = smp_processor_id();
60133
60134 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
60135 tick_broadcast_clear_oneshot(cpu);
60136diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
60137--- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
60138+++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
60139@@ -14,6 +14,7 @@
60140 #include <linux/init.h>
60141 #include <linux/mm.h>
60142 #include <linux/sched.h>
60143+#include <linux/grsecurity.h>
60144 #include <linux/syscore_ops.h>
60145 #include <linux/clocksource.h>
60146 #include <linux/jiffies.h>
60147@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
60148 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60149 return -EINVAL;
60150
60151+ gr_log_timechange();
60152+
60153 write_seqlock_irqsave(&xtime_lock, flags);
60154
60155 timekeeping_forward_now();
60156diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60157--- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60158+++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60159@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60160
60161 static void print_name_offset(struct seq_file *m, void *sym)
60162 {
60163+#ifdef CONFIG_GRKERNSEC_HIDESYM
60164+ SEQ_printf(m, "<%p>", NULL);
60165+#else
60166 char symname[KSYM_NAME_LEN];
60167
60168 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60169 SEQ_printf(m, "<%pK>", sym);
60170 else
60171 SEQ_printf(m, "%s", symname);
60172+#endif
60173 }
60174
60175 static void
60176@@ -112,7 +116,11 @@ next_one:
60177 static void
60178 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60179 {
60180+#ifdef CONFIG_GRKERNSEC_HIDESYM
60181+ SEQ_printf(m, " .base: %p\n", NULL);
60182+#else
60183 SEQ_printf(m, " .base: %pK\n", base);
60184+#endif
60185 SEQ_printf(m, " .index: %d\n",
60186 base->index);
60187 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60188@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60189 {
60190 struct proc_dir_entry *pe;
60191
60192+#ifdef CONFIG_GRKERNSEC_PROC_ADD
60193+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60194+#else
60195 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60196+#endif
60197 if (!pe)
60198 return -ENOMEM;
60199 return 0;
60200diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60201--- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60202+++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60203@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60204 static unsigned long nr_entries;
60205 static struct entry entries[MAX_ENTRIES];
60206
60207-static atomic_t overflow_count;
60208+static atomic_unchecked_t overflow_count;
60209
60210 /*
60211 * The entries are in a hash-table, for fast lookup:
60212@@ -140,7 +140,7 @@ static void reset_entries(void)
60213 nr_entries = 0;
60214 memset(entries, 0, sizeof(entries));
60215 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60216- atomic_set(&overflow_count, 0);
60217+ atomic_set_unchecked(&overflow_count, 0);
60218 }
60219
60220 static struct entry *alloc_entry(void)
60221@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60222 if (likely(entry))
60223 entry->count++;
60224 else
60225- atomic_inc(&overflow_count);
60226+ atomic_inc_unchecked(&overflow_count);
60227
60228 out_unlock:
60229 raw_spin_unlock_irqrestore(lock, flags);
60230@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60231
60232 static void print_name_offset(struct seq_file *m, unsigned long addr)
60233 {
60234+#ifdef CONFIG_GRKERNSEC_HIDESYM
60235+ seq_printf(m, "<%p>", NULL);
60236+#else
60237 char symname[KSYM_NAME_LEN];
60238
60239 if (lookup_symbol_name(addr, symname) < 0)
60240 seq_printf(m, "<%p>", (void *)addr);
60241 else
60242 seq_printf(m, "%s", symname);
60243+#endif
60244 }
60245
60246 static int tstats_show(struct seq_file *m, void *v)
60247@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60248
60249 seq_puts(m, "Timer Stats Version: v0.2\n");
60250 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60251- if (atomic_read(&overflow_count))
60252+ if (atomic_read_unchecked(&overflow_count))
60253 seq_printf(m, "Overflow: %d entries\n",
60254- atomic_read(&overflow_count));
60255+ atomic_read_unchecked(&overflow_count));
60256
60257 for (i = 0; i < nr_entries; i++) {
60258 entry = entries + i;
60259@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60260 {
60261 struct proc_dir_entry *pe;
60262
60263+#ifdef CONFIG_GRKERNSEC_PROC_ADD
60264+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60265+#else
60266 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60267+#endif
60268 if (!pe)
60269 return -ENOMEM;
60270 return 0;
60271diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60272--- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60273+++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60274@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60275 return error;
60276
60277 if (tz) {
60278+ /* we log in do_settimeofday called below, so don't log twice
60279+ */
60280+ if (!tv)
60281+ gr_log_timechange();
60282+
60283 /* SMP safe, global irq locking makes it work. */
60284 sys_tz = *tz;
60285 update_vsyscall_tz();
60286diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60287--- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60288+++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60289@@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60290 /*
60291 * This function runs timers and the timer-tq in bottom half context.
60292 */
60293-static void run_timer_softirq(struct softirq_action *h)
60294+static void run_timer_softirq(void)
60295 {
60296 struct tvec_base *base = __this_cpu_read(tvec_bases);
60297
60298diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60299--- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60300+++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60301@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60302 struct blk_trace *bt = filp->private_data;
60303 char buf[16];
60304
60305- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60306+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60307
60308 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60309 }
60310@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60311 return 1;
60312
60313 bt = buf->chan->private_data;
60314- atomic_inc(&bt->dropped);
60315+ atomic_inc_unchecked(&bt->dropped);
60316 return 0;
60317 }
60318
60319@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60320
60321 bt->dir = dir;
60322 bt->dev = dev;
60323- atomic_set(&bt->dropped, 0);
60324+ atomic_set_unchecked(&bt->dropped, 0);
60325
60326 ret = -EIO;
60327 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60328diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60329--- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60330+++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60331@@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60332
60333 ip = rec->ip;
60334
60335+ ret = ftrace_arch_code_modify_prepare();
60336+ FTRACE_WARN_ON(ret);
60337+ if (ret)
60338+ return 0;
60339+
60340 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60341+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60342 if (ret) {
60343 ftrace_bug(ret, ip);
60344 rec->flags |= FTRACE_FL_FAILED;
60345- return 0;
60346 }
60347- return 1;
60348+ return ret ? 0 : 1;
60349 }
60350
60351 /*
60352@@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60353
60354 int
60355 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60356- void *data)
60357+ void *data)
60358 {
60359 struct ftrace_func_probe *entry;
60360 struct ftrace_page *pg;
60361diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60362--- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60363+++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60364@@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60365 size_t rem;
60366 unsigned int i;
60367
60368+ pax_track_stack();
60369+
60370 if (splice_grow_spd(pipe, &spd))
60371 return -ENOMEM;
60372
60373@@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60374 int entries, size, i;
60375 size_t ret;
60376
60377+ pax_track_stack();
60378+
60379 if (splice_grow_spd(pipe, &spd))
60380 return -ENOMEM;
60381
60382@@ -3981,10 +3985,9 @@ static const struct file_operations trac
60383 };
60384 #endif
60385
60386-static struct dentry *d_tracer;
60387-
60388 struct dentry *tracing_init_dentry(void)
60389 {
60390+ static struct dentry *d_tracer;
60391 static int once;
60392
60393 if (d_tracer)
60394@@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60395 return d_tracer;
60396 }
60397
60398-static struct dentry *d_percpu;
60399-
60400 struct dentry *tracing_dentry_percpu(void)
60401 {
60402+ static struct dentry *d_percpu;
60403 static int once;
60404 struct dentry *d_tracer;
60405
60406diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60407--- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60408+++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60409@@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60410 struct ftrace_module_file_ops {
60411 struct list_head list;
60412 struct module *mod;
60413- struct file_operations id;
60414- struct file_operations enable;
60415- struct file_operations format;
60416- struct file_operations filter;
60417 };
60418
60419 static struct ftrace_module_file_ops *
60420@@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60421
60422 file_ops->mod = mod;
60423
60424- file_ops->id = ftrace_event_id_fops;
60425- file_ops->id.owner = mod;
60426-
60427- file_ops->enable = ftrace_enable_fops;
60428- file_ops->enable.owner = mod;
60429-
60430- file_ops->filter = ftrace_event_filter_fops;
60431- file_ops->filter.owner = mod;
60432-
60433- file_ops->format = ftrace_event_format_fops;
60434- file_ops->format.owner = mod;
60435+ pax_open_kernel();
60436+ *(void **)&mod->trace_id.owner = mod;
60437+ *(void **)&mod->trace_enable.owner = mod;
60438+ *(void **)&mod->trace_filter.owner = mod;
60439+ *(void **)&mod->trace_format.owner = mod;
60440+ pax_close_kernel();
60441
60442 list_add(&file_ops->list, &ftrace_module_file_list);
60443
60444@@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60445
60446 for_each_event(call, start, end) {
60447 __trace_add_event_call(*call, mod,
60448- &file_ops->id, &file_ops->enable,
60449- &file_ops->filter, &file_ops->format);
60450+ &mod->trace_id, &mod->trace_enable,
60451+ &mod->trace_filter, &mod->trace_format);
60452 }
60453 }
60454
60455diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60456--- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60457+++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60458@@ -24,7 +24,7 @@ struct header_iter {
60459 static struct trace_array *mmio_trace_array;
60460 static bool overrun_detected;
60461 static unsigned long prev_overruns;
60462-static atomic_t dropped_count;
60463+static atomic_unchecked_t dropped_count;
60464
60465 static void mmio_reset_data(struct trace_array *tr)
60466 {
60467@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60468
60469 static unsigned long count_overruns(struct trace_iterator *iter)
60470 {
60471- unsigned long cnt = atomic_xchg(&dropped_count, 0);
60472+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60473 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60474
60475 if (over > prev_overruns)
60476@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60477 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60478 sizeof(*entry), 0, pc);
60479 if (!event) {
60480- atomic_inc(&dropped_count);
60481+ atomic_inc_unchecked(&dropped_count);
60482 return;
60483 }
60484 entry = ring_buffer_event_data(event);
60485@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60486 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60487 sizeof(*entry), 0, pc);
60488 if (!event) {
60489- atomic_inc(&dropped_count);
60490+ atomic_inc_unchecked(&dropped_count);
60491 return;
60492 }
60493 entry = ring_buffer_event_data(event);
60494diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60495--- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60496+++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60497@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60498
60499 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60500 if (!IS_ERR(p)) {
60501- p = mangle_path(s->buffer + s->len, p, "\n");
60502+ p = mangle_path(s->buffer + s->len, p, "\n\\");
60503 if (p) {
60504 s->len = p - s->buffer;
60505 return 1;
60506diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60507--- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60508+++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60509@@ -50,7 +50,7 @@ static inline void check_stack(void)
60510 return;
60511
60512 /* we do not handle interrupt stacks yet */
60513- if (!object_is_on_stack(&this_size))
60514+ if (!object_starts_on_stack(&this_size))
60515 return;
60516
60517 local_irq_save(flags);
60518diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60519--- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60520+++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60521@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60522 int cpu;
60523 pid_t pid;
60524 /* Can be inserted from interrupt or user context, need to be atomic */
60525- atomic_t inserted;
60526+ atomic_unchecked_t inserted;
60527 /*
60528 * Don't need to be atomic, works are serialized in a single workqueue thread
60529 * on a single CPU.
60530@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60531 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60532 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60533 if (node->pid == wq_thread->pid) {
60534- atomic_inc(&node->inserted);
60535+ atomic_inc_unchecked(&node->inserted);
60536 goto found;
60537 }
60538 }
60539@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60540 tsk = get_pid_task(pid, PIDTYPE_PID);
60541 if (tsk) {
60542 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60543- atomic_read(&cws->inserted), cws->executed,
60544+ atomic_read_unchecked(&cws->inserted), cws->executed,
60545 tsk->comm);
60546 put_task_struct(tsk);
60547 }
60548diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60549--- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60550+++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60551@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60552 return BUG_TRAP_TYPE_NONE;
60553
60554 bug = find_bug(bugaddr);
60555+ if (!bug)
60556+ return BUG_TRAP_TYPE_NONE;
60557
60558 file = NULL;
60559 line = 0;
60560diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60561--- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60562+++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60563@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60564 if (limit > 4)
60565 return;
60566
60567- is_on_stack = object_is_on_stack(addr);
60568+ is_on_stack = object_starts_on_stack(addr);
60569 if (is_on_stack == onstack)
60570 return;
60571
60572diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60573--- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60574+++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60575@@ -862,7 +862,7 @@ out:
60576
60577 static void check_for_stack(struct device *dev, void *addr)
60578 {
60579- if (object_is_on_stack(addr))
60580+ if (object_starts_on_stack(addr))
60581 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60582 "stack [addr=%p]\n", addr);
60583 }
60584diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60585--- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60586+++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60587@@ -269,7 +269,7 @@ static void free(void *where)
60588 malloc_ptr = free_mem_ptr;
60589 }
60590 #else
60591-#define malloc(a) kmalloc(a, GFP_KERNEL)
60592+#define malloc(a) kmalloc((a), GFP_KERNEL)
60593 #define free(a) kfree(a)
60594 #endif
60595
60596diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60597--- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60598+++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60599@@ -1078,6 +1078,7 @@ config LATENCYTOP
60600 depends on DEBUG_KERNEL
60601 depends on STACKTRACE_SUPPORT
60602 depends on PROC_FS
60603+ depends on !GRKERNSEC_HIDESYM
60604 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60605 select KALLSYMS
60606 select KALLSYMS_ALL
60607diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60608--- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60609+++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60610@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60611 */
60612 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60613 {
60614- WARN_ON(release == NULL);
60615+ BUG_ON(release == NULL);
60616 WARN_ON(release == (void (*)(struct kref *))kfree);
60617
60618 if (atomic_dec_and_test(&kref->refcount)) {
60619diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60620--- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60621+++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60622@@ -80,7 +80,7 @@ struct radix_tree_preload {
60623 int nr;
60624 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60625 };
60626-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60627+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60628
60629 static inline void *ptr_to_indirect(void *ptr)
60630 {
60631diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60632--- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60633+++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60634@@ -16,6 +16,9 @@
60635 * - scnprintf and vscnprintf
60636 */
60637
60638+#ifdef CONFIG_GRKERNSEC_HIDESYM
60639+#define __INCLUDED_BY_HIDESYM 1
60640+#endif
60641 #include <stdarg.h>
60642 #include <linux/module.h>
60643 #include <linux/types.h>
60644@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60645 char sym[KSYM_SYMBOL_LEN];
60646 if (ext == 'B')
60647 sprint_backtrace(sym, value);
60648- else if (ext != 'f' && ext != 's')
60649+ else if (ext != 'f' && ext != 's' && ext != 'a')
60650 sprint_symbol(sym, value);
60651 else
60652 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60653@@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60654 return string(buf, end, uuid, spec);
60655 }
60656
60657+#ifdef CONFIG_GRKERNSEC_HIDESYM
60658+int kptr_restrict __read_mostly = 2;
60659+#else
60660 int kptr_restrict __read_mostly;
60661+#endif
60662
60663 /*
60664 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60665@@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60666 * - 'S' For symbolic direct pointers with offset
60667 * - 's' For symbolic direct pointers without offset
60668 * - 'B' For backtraced symbolic direct pointers with offset
60669+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60670+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60671 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60672 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60673 * - 'M' For a 6-byte MAC address, it prints the address in the
60674@@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60675 {
60676 if (!ptr && *fmt != 'K') {
60677 /*
60678- * Print (null) with the same width as a pointer so it makes
60679+ * Print (nil) with the same width as a pointer so it makes
60680 * tabular output look nice.
60681 */
60682 if (spec.field_width == -1)
60683 spec.field_width = 2 * sizeof(void *);
60684- return string(buf, end, "(null)", spec);
60685+ return string(buf, end, "(nil)", spec);
60686 }
60687
60688 switch (*fmt) {
60689@@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60690 /* Fallthrough */
60691 case 'S':
60692 case 's':
60693+#ifdef CONFIG_GRKERNSEC_HIDESYM
60694+ break;
60695+#else
60696+ return symbol_string(buf, end, ptr, spec, *fmt);
60697+#endif
60698+ case 'A':
60699+ case 'a':
60700 case 'B':
60701 return symbol_string(buf, end, ptr, spec, *fmt);
60702 case 'R':
60703@@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60704 typeof(type) value; \
60705 if (sizeof(type) == 8) { \
60706 args = PTR_ALIGN(args, sizeof(u32)); \
60707- *(u32 *)&value = *(u32 *)args; \
60708- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60709+ *(u32 *)&value = *(const u32 *)args; \
60710+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60711 } else { \
60712 args = PTR_ALIGN(args, sizeof(type)); \
60713- value = *(typeof(type) *)args; \
60714+ value = *(const typeof(type) *)args; \
60715 } \
60716 args += sizeof(type); \
60717 value; \
60718@@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60719 case FORMAT_TYPE_STR: {
60720 const char *str_arg = args;
60721 args += strlen(str_arg) + 1;
60722- str = string(str, end, (char *)str_arg, spec);
60723+ str = string(str, end, str_arg, spec);
60724 break;
60725 }
60726
60727diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60728--- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60729+++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60730@@ -0,0 +1 @@
60731+-grsec
60732diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60733--- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60734+++ linux-2.6.39.4/Makefile 2011-08-07 14:17:20.000000000 -0400
60735@@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60736
60737 HOSTCC = gcc
60738 HOSTCXX = g++
60739-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60740-HOSTCXXFLAGS = -O2
60741+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60742+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60743+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60744
60745 # Decide whether to build built-in, modular, or both.
60746 # Normally, just do built-in.
60747@@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60748 KBUILD_CPPFLAGS := -D__KERNEL__
60749
60750 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60751+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
60752 -fno-strict-aliasing -fno-common \
60753 -Werror-implicit-function-declaration \
60754 -Wno-format-security \
60755 -fno-delete-null-pointer-checks
60756+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60757 KBUILD_AFLAGS_KERNEL :=
60758 KBUILD_CFLAGS_KERNEL :=
60759 KBUILD_AFLAGS := -D__ASSEMBLY__
60760@@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60761 # Rules shared between *config targets and build targets
60762
60763 # Basic helpers built in scripts/
60764-PHONY += scripts_basic
60765-scripts_basic:
60766+PHONY += scripts_basic gcc-plugins
60767+scripts_basic: gcc-plugins
60768 $(Q)$(MAKE) $(build)=scripts/basic
60769 $(Q)rm -f .tmp_quiet_recordmcount
60770
60771@@ -548,6 +551,25 @@ else
60772 KBUILD_CFLAGS += -O2
60773 endif
60774
60775+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60776+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60777+ifdef CONFIG_PAX_MEMORY_STACKLEAK
60778+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60779+endif
60780+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60781+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60782+gcc-plugins:
60783+ $(Q)$(MAKE) $(build)=tools/gcc
60784+else
60785+gcc-plugins:
60786+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60787+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60788+else
60789+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60790+endif
60791+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60792+endif
60793+
60794 include $(srctree)/arch/$(SRCARCH)/Makefile
60795
60796 ifneq ($(CONFIG_FRAME_WARN),0)
60797@@ -685,7 +707,7 @@ export mod_strip_cmd
60798
60799
60800 ifeq ($(KBUILD_EXTMOD),)
60801-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60802+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60803
60804 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60805 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60806@@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60807 endif
60808
60809 # prepare2 creates a makefile if using a separate output directory
60810-prepare2: prepare3 outputmakefile
60811+prepare2: prepare3 outputmakefile gcc-plugins
60812
60813 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60814 include/config/auto.conf
60815@@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60816 $(call cmd,rmdirs)
60817 $(call cmd,rmfiles)
60818 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60819- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60820+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60821 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60822 -o -name '*.symtypes' -o -name 'modules.order' \
60823 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60824diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60825--- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60826+++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60827@@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60828 struct address_space *mapping = file->f_mapping;
60829
60830 if (!mapping->a_ops->readpage)
60831- return -ENOEXEC;
60832+ return -ENODEV;
60833 file_accessed(file);
60834 vma->vm_ops = &generic_file_vm_ops;
60835 vma->vm_flags |= VM_CAN_NONLINEAR;
60836@@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60837 *pos = i_size_read(inode);
60838
60839 if (limit != RLIM_INFINITY) {
60840+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60841 if (*pos >= limit) {
60842 send_sig(SIGXFSZ, current, 0);
60843 return -EFBIG;
60844diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60845--- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60846+++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60847@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60848 retry:
60849 vma = find_vma(mm, start);
60850
60851+#ifdef CONFIG_PAX_SEGMEXEC
60852+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60853+ goto out;
60854+#endif
60855+
60856 /*
60857 * Make sure the vma is shared, that it supports prefaulting,
60858 * and that the remapped range is valid and fully within
60859@@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60860 /*
60861 * drop PG_Mlocked flag for over-mapped range
60862 */
60863- unsigned int saved_flags = vma->vm_flags;
60864+ unsigned long saved_flags = vma->vm_flags;
60865 munlock_vma_pages_range(vma, start, start + size);
60866 vma->vm_flags = saved_flags;
60867 }
60868diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60869--- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60870+++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60871@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60872 * So no dangers, even with speculative execution.
60873 */
60874 page = pte_page(pkmap_page_table[i]);
60875+ pax_open_kernel();
60876 pte_clear(&init_mm, (unsigned long)page_address(page),
60877 &pkmap_page_table[i]);
60878-
60879+ pax_close_kernel();
60880 set_page_address(page, NULL);
60881 need_flush = 1;
60882 }
60883@@ -186,9 +187,11 @@ start:
60884 }
60885 }
60886 vaddr = PKMAP_ADDR(last_pkmap_nr);
60887+
60888+ pax_open_kernel();
60889 set_pte_at(&init_mm, vaddr,
60890 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60891-
60892+ pax_close_kernel();
60893 pkmap_count[last_pkmap_nr] = 1;
60894 set_page_address(page, (void *)vaddr);
60895
60896diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60897--- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60898+++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60899@@ -702,7 +702,7 @@ out:
60900 * run pte_offset_map on the pmd, if an huge pmd could
60901 * materialize from under us from a different thread.
60902 */
60903- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60904+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60905 return VM_FAULT_OOM;
60906 /* if an huge pmd materialized from under us just retry later */
60907 if (unlikely(pmd_trans_huge(*pmd)))
60908diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60909--- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60910+++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60911@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60912 return 1;
60913 }
60914
60915+#ifdef CONFIG_PAX_SEGMEXEC
60916+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60917+{
60918+ struct mm_struct *mm = vma->vm_mm;
60919+ struct vm_area_struct *vma_m;
60920+ unsigned long address_m;
60921+ pte_t *ptep_m;
60922+
60923+ vma_m = pax_find_mirror_vma(vma);
60924+ if (!vma_m)
60925+ return;
60926+
60927+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60928+ address_m = address + SEGMEXEC_TASK_SIZE;
60929+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60930+ get_page(page_m);
60931+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
60932+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60933+}
60934+#endif
60935+
60936 /*
60937 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60938 */
60939@@ -2440,6 +2461,11 @@ retry_avoidcopy:
60940 make_huge_pte(vma, new_page, 1));
60941 page_remove_rmap(old_page);
60942 hugepage_add_new_anon_rmap(new_page, vma, address);
60943+
60944+#ifdef CONFIG_PAX_SEGMEXEC
60945+ pax_mirror_huge_pte(vma, address, new_page);
60946+#endif
60947+
60948 /* Make the old page be freed below */
60949 new_page = old_page;
60950 mmu_notifier_invalidate_range_end(mm,
60951@@ -2591,6 +2617,10 @@ retry:
60952 && (vma->vm_flags & VM_SHARED)));
60953 set_huge_pte_at(mm, address, ptep, new_pte);
60954
60955+#ifdef CONFIG_PAX_SEGMEXEC
60956+ pax_mirror_huge_pte(vma, address, page);
60957+#endif
60958+
60959 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60960 /* Optimization, do the COW without a second fault */
60961 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60962@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60963 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60964 struct hstate *h = hstate_vma(vma);
60965
60966+#ifdef CONFIG_PAX_SEGMEXEC
60967+ struct vm_area_struct *vma_m;
60968+#endif
60969+
60970 ptep = huge_pte_offset(mm, address);
60971 if (ptep) {
60972 entry = huge_ptep_get(ptep);
60973@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60974 VM_FAULT_SET_HINDEX(h - hstates);
60975 }
60976
60977+#ifdef CONFIG_PAX_SEGMEXEC
60978+ vma_m = pax_find_mirror_vma(vma);
60979+ if (vma_m) {
60980+ unsigned long address_m;
60981+
60982+ if (vma->vm_start > vma_m->vm_start) {
60983+ address_m = address;
60984+ address -= SEGMEXEC_TASK_SIZE;
60985+ vma = vma_m;
60986+ h = hstate_vma(vma);
60987+ } else
60988+ address_m = address + SEGMEXEC_TASK_SIZE;
60989+
60990+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60991+ return VM_FAULT_OOM;
60992+ address_m &= HPAGE_MASK;
60993+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60994+ }
60995+#endif
60996+
60997 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60998 if (!ptep)
60999 return VM_FAULT_OOM;
61000diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
61001--- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
61002+++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
61003@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
61004 * in mm/page_alloc.c
61005 */
61006 extern void __free_pages_bootmem(struct page *page, unsigned int order);
61007+extern void free_compound_page(struct page *page);
61008 extern void prep_compound_page(struct page *page, unsigned long order);
61009 #ifdef CONFIG_MEMORY_FAILURE
61010 extern bool is_free_buddy_page(struct page *page);
61011diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
61012--- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
61013+++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
61014@@ -240,7 +240,7 @@ config KSM
61015 config DEFAULT_MMAP_MIN_ADDR
61016 int "Low address space to protect from user allocation"
61017 depends on MMU
61018- default 4096
61019+ default 65536
61020 help
61021 This is the portion of low virtual memory which should be protected
61022 from userspace allocation. Keeping a user from writing to low pages
61023diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
61024--- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
61025+++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
61026@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
61027
61028 for (i = 0; i < object->trace_len; i++) {
61029 void *ptr = (void *)object->trace[i];
61030- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
61031+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
61032 }
61033 }
61034
61035diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
61036--- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
61037+++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
61038@@ -15,10 +15,10 @@
61039 * happens, handle that and return -EFAULT.
61040 */
61041
61042-long __weak probe_kernel_read(void *dst, void *src, size_t size)
61043+long __weak probe_kernel_read(void *dst, const void *src, size_t size)
61044 __attribute__((alias("__probe_kernel_read")));
61045
61046-long __probe_kernel_read(void *dst, void *src, size_t size)
61047+long __probe_kernel_read(void *dst, const void *src, size_t size)
61048 {
61049 long ret;
61050 mm_segment_t old_fs = get_fs();
61051@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
61052 * Safely write to address @dst from the buffer at @src. If a kernel fault
61053 * happens, handle that and return -EFAULT.
61054 */
61055-long __weak probe_kernel_write(void *dst, void *src, size_t size)
61056+long __weak probe_kernel_write(void *dst, const void *src, size_t size)
61057 __attribute__((alias("__probe_kernel_write")));
61058
61059-long __probe_kernel_write(void *dst, void *src, size_t size)
61060+long __probe_kernel_write(void *dst, const void *src, size_t size)
61061 {
61062 long ret;
61063 mm_segment_t old_fs = get_fs();
61064diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
61065--- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
61066+++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
61067@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
61068 pgoff_t pgoff;
61069 unsigned long new_flags = vma->vm_flags;
61070
61071+#ifdef CONFIG_PAX_SEGMEXEC
61072+ struct vm_area_struct *vma_m;
61073+#endif
61074+
61075 switch (behavior) {
61076 case MADV_NORMAL:
61077 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
61078@@ -110,6 +114,13 @@ success:
61079 /*
61080 * vm_flags is protected by the mmap_sem held in write mode.
61081 */
61082+
61083+#ifdef CONFIG_PAX_SEGMEXEC
61084+ vma_m = pax_find_mirror_vma(vma);
61085+ if (vma_m)
61086+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
61087+#endif
61088+
61089 vma->vm_flags = new_flags;
61090
61091 out:
61092@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
61093 struct vm_area_struct ** prev,
61094 unsigned long start, unsigned long end)
61095 {
61096+
61097+#ifdef CONFIG_PAX_SEGMEXEC
61098+ struct vm_area_struct *vma_m;
61099+#endif
61100+
61101 *prev = vma;
61102 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
61103 return -EINVAL;
61104@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
61105 zap_page_range(vma, start, end - start, &details);
61106 } else
61107 zap_page_range(vma, start, end - start, NULL);
61108+
61109+#ifdef CONFIG_PAX_SEGMEXEC
61110+ vma_m = pax_find_mirror_vma(vma);
61111+ if (vma_m) {
61112+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
61113+ struct zap_details details = {
61114+ .nonlinear_vma = vma_m,
61115+ .last_index = ULONG_MAX,
61116+ };
61117+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
61118+ } else
61119+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
61120+ }
61121+#endif
61122+
61123 return 0;
61124 }
61125
61126@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
61127 if (end < start)
61128 goto out;
61129
61130+#ifdef CONFIG_PAX_SEGMEXEC
61131+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
61132+ if (end > SEGMEXEC_TASK_SIZE)
61133+ goto out;
61134+ } else
61135+#endif
61136+
61137+ if (end > TASK_SIZE)
61138+ goto out;
61139+
61140 error = 0;
61141 if (end == start)
61142 goto out;
61143diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
61144--- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
61145+++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
61146@@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
61147 return;
61148
61149 pmd = pmd_offset(pud, start);
61150+
61151+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61152 pud_clear(pud);
61153 pmd_free_tlb(tlb, pmd, start);
61154+#endif
61155+
61156 }
61157
61158 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61159@@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61160 if (end - 1 > ceiling - 1)
61161 return;
61162
61163+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61164 pud = pud_offset(pgd, start);
61165 pgd_clear(pgd);
61166 pud_free_tlb(tlb, pud, start);
61167+#endif
61168+
61169 }
61170
61171 /*
61172@@ -1410,12 +1417,6 @@ no_page_table:
61173 return page;
61174 }
61175
61176-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61177-{
61178- return stack_guard_page_start(vma, addr) ||
61179- stack_guard_page_end(vma, addr+PAGE_SIZE);
61180-}
61181-
61182 /**
61183 * __get_user_pages() - pin user pages in memory
61184 * @tsk: task_struct of target task
61185@@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61186 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61187 i = 0;
61188
61189- do {
61190+ while (nr_pages) {
61191 struct vm_area_struct *vma;
61192
61193- vma = find_extend_vma(mm, start);
61194+ vma = find_vma(mm, start);
61195 if (!vma && in_gate_area(mm, start)) {
61196 unsigned long pg = start & PAGE_MASK;
61197 pgd_t *pgd;
61198@@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61199 goto next_page;
61200 }
61201
61202- if (!vma ||
61203+ if (!vma || start < vma->vm_start ||
61204 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61205 !(vm_flags & vma->vm_flags))
61206 return i ? : -EFAULT;
61207@@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61208 int ret;
61209 unsigned int fault_flags = 0;
61210
61211- /* For mlock, just skip the stack guard page. */
61212- if (foll_flags & FOLL_MLOCK) {
61213- if (stack_guard_page(vma, start))
61214- goto next_page;
61215- }
61216 if (foll_flags & FOLL_WRITE)
61217 fault_flags |= FAULT_FLAG_WRITE;
61218 if (nonblocking)
61219@@ -1644,7 +1640,7 @@ next_page:
61220 start += PAGE_SIZE;
61221 nr_pages--;
61222 } while (nr_pages && start < vma->vm_end);
61223- } while (nr_pages);
61224+ }
61225 return i;
61226 }
61227 EXPORT_SYMBOL(__get_user_pages);
61228@@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61229 page_add_file_rmap(page);
61230 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61231
61232+#ifdef CONFIG_PAX_SEGMEXEC
61233+ pax_mirror_file_pte(vma, addr, page, ptl);
61234+#endif
61235+
61236 retval = 0;
61237 pte_unmap_unlock(pte, ptl);
61238 return retval;
61239@@ -1829,10 +1829,22 @@ out:
61240 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61241 struct page *page)
61242 {
61243+
61244+#ifdef CONFIG_PAX_SEGMEXEC
61245+ struct vm_area_struct *vma_m;
61246+#endif
61247+
61248 if (addr < vma->vm_start || addr >= vma->vm_end)
61249 return -EFAULT;
61250 if (!page_count(page))
61251 return -EINVAL;
61252+
61253+#ifdef CONFIG_PAX_SEGMEXEC
61254+ vma_m = pax_find_mirror_vma(vma);
61255+ if (vma_m)
61256+ vma_m->vm_flags |= VM_INSERTPAGE;
61257+#endif
61258+
61259 vma->vm_flags |= VM_INSERTPAGE;
61260 return insert_page(vma, addr, page, vma->vm_page_prot);
61261 }
61262@@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61263 unsigned long pfn)
61264 {
61265 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61266+ BUG_ON(vma->vm_mirror);
61267
61268 if (addr < vma->vm_start || addr >= vma->vm_end)
61269 return -EFAULT;
61270@@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61271 copy_user_highpage(dst, src, va, vma);
61272 }
61273
61274+#ifdef CONFIG_PAX_SEGMEXEC
61275+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61276+{
61277+ struct mm_struct *mm = vma->vm_mm;
61278+ spinlock_t *ptl;
61279+ pte_t *pte, entry;
61280+
61281+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61282+ entry = *pte;
61283+ if (!pte_present(entry)) {
61284+ if (!pte_none(entry)) {
61285+ BUG_ON(pte_file(entry));
61286+ free_swap_and_cache(pte_to_swp_entry(entry));
61287+ pte_clear_not_present_full(mm, address, pte, 0);
61288+ }
61289+ } else {
61290+ struct page *page;
61291+
61292+ flush_cache_page(vma, address, pte_pfn(entry));
61293+ entry = ptep_clear_flush(vma, address, pte);
61294+ BUG_ON(pte_dirty(entry));
61295+ page = vm_normal_page(vma, address, entry);
61296+ if (page) {
61297+ update_hiwater_rss(mm);
61298+ if (PageAnon(page))
61299+ dec_mm_counter_fast(mm, MM_ANONPAGES);
61300+ else
61301+ dec_mm_counter_fast(mm, MM_FILEPAGES);
61302+ page_remove_rmap(page);
61303+ page_cache_release(page);
61304+ }
61305+ }
61306+ pte_unmap_unlock(pte, ptl);
61307+}
61308+
61309+/* PaX: if vma is mirrored, synchronize the mirror's PTE
61310+ *
61311+ * the ptl of the lower mapped page is held on entry and is not released on exit
61312+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61313+ */
61314+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61315+{
61316+ struct mm_struct *mm = vma->vm_mm;
61317+ unsigned long address_m;
61318+ spinlock_t *ptl_m;
61319+ struct vm_area_struct *vma_m;
61320+ pmd_t *pmd_m;
61321+ pte_t *pte_m, entry_m;
61322+
61323+ BUG_ON(!page_m || !PageAnon(page_m));
61324+
61325+ vma_m = pax_find_mirror_vma(vma);
61326+ if (!vma_m)
61327+ return;
61328+
61329+ BUG_ON(!PageLocked(page_m));
61330+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61331+ address_m = address + SEGMEXEC_TASK_SIZE;
61332+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61333+ pte_m = pte_offset_map(pmd_m, address_m);
61334+ ptl_m = pte_lockptr(mm, pmd_m);
61335+ if (ptl != ptl_m) {
61336+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61337+ if (!pte_none(*pte_m))
61338+ goto out;
61339+ }
61340+
61341+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61342+ page_cache_get(page_m);
61343+ page_add_anon_rmap(page_m, vma_m, address_m);
61344+ inc_mm_counter_fast(mm, MM_ANONPAGES);
61345+ set_pte_at(mm, address_m, pte_m, entry_m);
61346+ update_mmu_cache(vma_m, address_m, entry_m);
61347+out:
61348+ if (ptl != ptl_m)
61349+ spin_unlock(ptl_m);
61350+ pte_unmap(pte_m);
61351+ unlock_page(page_m);
61352+}
61353+
61354+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61355+{
61356+ struct mm_struct *mm = vma->vm_mm;
61357+ unsigned long address_m;
61358+ spinlock_t *ptl_m;
61359+ struct vm_area_struct *vma_m;
61360+ pmd_t *pmd_m;
61361+ pte_t *pte_m, entry_m;
61362+
61363+ BUG_ON(!page_m || PageAnon(page_m));
61364+
61365+ vma_m = pax_find_mirror_vma(vma);
61366+ if (!vma_m)
61367+ return;
61368+
61369+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61370+ address_m = address + SEGMEXEC_TASK_SIZE;
61371+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61372+ pte_m = pte_offset_map(pmd_m, address_m);
61373+ ptl_m = pte_lockptr(mm, pmd_m);
61374+ if (ptl != ptl_m) {
61375+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61376+ if (!pte_none(*pte_m))
61377+ goto out;
61378+ }
61379+
61380+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61381+ page_cache_get(page_m);
61382+ page_add_file_rmap(page_m);
61383+ inc_mm_counter_fast(mm, MM_FILEPAGES);
61384+ set_pte_at(mm, address_m, pte_m, entry_m);
61385+ update_mmu_cache(vma_m, address_m, entry_m);
61386+out:
61387+ if (ptl != ptl_m)
61388+ spin_unlock(ptl_m);
61389+ pte_unmap(pte_m);
61390+}
61391+
61392+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61393+{
61394+ struct mm_struct *mm = vma->vm_mm;
61395+ unsigned long address_m;
61396+ spinlock_t *ptl_m;
61397+ struct vm_area_struct *vma_m;
61398+ pmd_t *pmd_m;
61399+ pte_t *pte_m, entry_m;
61400+
61401+ vma_m = pax_find_mirror_vma(vma);
61402+ if (!vma_m)
61403+ return;
61404+
61405+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61406+ address_m = address + SEGMEXEC_TASK_SIZE;
61407+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61408+ pte_m = pte_offset_map(pmd_m, address_m);
61409+ ptl_m = pte_lockptr(mm, pmd_m);
61410+ if (ptl != ptl_m) {
61411+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61412+ if (!pte_none(*pte_m))
61413+ goto out;
61414+ }
61415+
61416+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61417+ set_pte_at(mm, address_m, pte_m, entry_m);
61418+out:
61419+ if (ptl != ptl_m)
61420+ spin_unlock(ptl_m);
61421+ pte_unmap(pte_m);
61422+}
61423+
61424+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61425+{
61426+ struct page *page_m;
61427+ pte_t entry;
61428+
61429+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61430+ goto out;
61431+
61432+ entry = *pte;
61433+ page_m = vm_normal_page(vma, address, entry);
61434+ if (!page_m)
61435+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61436+ else if (PageAnon(page_m)) {
61437+ if (pax_find_mirror_vma(vma)) {
61438+ pte_unmap_unlock(pte, ptl);
61439+ lock_page(page_m);
61440+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61441+ if (pte_same(entry, *pte))
61442+ pax_mirror_anon_pte(vma, address, page_m, ptl);
61443+ else
61444+ unlock_page(page_m);
61445+ }
61446+ } else
61447+ pax_mirror_file_pte(vma, address, page_m, ptl);
61448+
61449+out:
61450+ pte_unmap_unlock(pte, ptl);
61451+}
61452+#endif
61453+
61454 /*
61455 * This routine handles present pages, when users try to write
61456 * to a shared page. It is done by copying the page to a new address
61457@@ -2444,6 +2637,12 @@ gotten:
61458 */
61459 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61460 if (likely(pte_same(*page_table, orig_pte))) {
61461+
61462+#ifdef CONFIG_PAX_SEGMEXEC
61463+ if (pax_find_mirror_vma(vma))
61464+ BUG_ON(!trylock_page(new_page));
61465+#endif
61466+
61467 if (old_page) {
61468 if (!PageAnon(old_page)) {
61469 dec_mm_counter_fast(mm, MM_FILEPAGES);
61470@@ -2495,6 +2694,10 @@ gotten:
61471 page_remove_rmap(old_page);
61472 }
61473
61474+#ifdef CONFIG_PAX_SEGMEXEC
61475+ pax_mirror_anon_pte(vma, address, new_page, ptl);
61476+#endif
61477+
61478 /* Free the old page.. */
61479 new_page = old_page;
61480 ret |= VM_FAULT_WRITE;
61481@@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61482 swap_free(entry);
61483 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61484 try_to_free_swap(page);
61485+
61486+#ifdef CONFIG_PAX_SEGMEXEC
61487+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61488+#endif
61489+
61490 unlock_page(page);
61491 if (swapcache) {
61492 /*
61493@@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61494
61495 /* No need to invalidate - it was non-present before */
61496 update_mmu_cache(vma, address, page_table);
61497+
61498+#ifdef CONFIG_PAX_SEGMEXEC
61499+ pax_mirror_anon_pte(vma, address, page, ptl);
61500+#endif
61501+
61502 unlock:
61503 pte_unmap_unlock(page_table, ptl);
61504 out:
61505@@ -2947,40 +3160,6 @@ out_release:
61506 }
61507
61508 /*
61509- * This is like a special single-page "expand_{down|up}wards()",
61510- * except we must first make sure that 'address{-|+}PAGE_SIZE'
61511- * doesn't hit another vma.
61512- */
61513-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61514-{
61515- address &= PAGE_MASK;
61516- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61517- struct vm_area_struct *prev = vma->vm_prev;
61518-
61519- /*
61520- * Is there a mapping abutting this one below?
61521- *
61522- * That's only ok if it's the same stack mapping
61523- * that has gotten split..
61524- */
61525- if (prev && prev->vm_end == address)
61526- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61527-
61528- expand_stack(vma, address - PAGE_SIZE);
61529- }
61530- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61531- struct vm_area_struct *next = vma->vm_next;
61532-
61533- /* As VM_GROWSDOWN but s/below/above/ */
61534- if (next && next->vm_start == address + PAGE_SIZE)
61535- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61536-
61537- expand_upwards(vma, address + PAGE_SIZE);
61538- }
61539- return 0;
61540-}
61541-
61542-/*
61543 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61544 * but allow concurrent faults), and pte mapped but not yet locked.
61545 * We return with mmap_sem still held, but pte unmapped and unlocked.
61546@@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61547 unsigned long address, pte_t *page_table, pmd_t *pmd,
61548 unsigned int flags)
61549 {
61550- struct page *page;
61551+ struct page *page = NULL;
61552 spinlock_t *ptl;
61553 pte_t entry;
61554
61555- pte_unmap(page_table);
61556-
61557- /* Check if we need to add a guard page to the stack */
61558- if (check_stack_guard_page(vma, address) < 0)
61559- return VM_FAULT_SIGBUS;
61560-
61561- /* Use the zero-page for reads */
61562 if (!(flags & FAULT_FLAG_WRITE)) {
61563 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61564 vma->vm_page_prot));
61565- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61566+ ptl = pte_lockptr(mm, pmd);
61567+ spin_lock(ptl);
61568 if (!pte_none(*page_table))
61569 goto unlock;
61570 goto setpte;
61571 }
61572
61573 /* Allocate our own private page. */
61574+ pte_unmap(page_table);
61575+
61576 if (unlikely(anon_vma_prepare(vma)))
61577 goto oom;
61578 page = alloc_zeroed_user_highpage_movable(vma, address);
61579@@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61580 if (!pte_none(*page_table))
61581 goto release;
61582
61583+#ifdef CONFIG_PAX_SEGMEXEC
61584+ if (pax_find_mirror_vma(vma))
61585+ BUG_ON(!trylock_page(page));
61586+#endif
61587+
61588 inc_mm_counter_fast(mm, MM_ANONPAGES);
61589 page_add_new_anon_rmap(page, vma, address);
61590 setpte:
61591@@ -3035,6 +3215,12 @@ setpte:
61592
61593 /* No need to invalidate - it was non-present before */
61594 update_mmu_cache(vma, address, page_table);
61595+
61596+#ifdef CONFIG_PAX_SEGMEXEC
61597+ if (page)
61598+ pax_mirror_anon_pte(vma, address, page, ptl);
61599+#endif
61600+
61601 unlock:
61602 pte_unmap_unlock(page_table, ptl);
61603 return 0;
61604@@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61605 */
61606 /* Only go through if we didn't race with anybody else... */
61607 if (likely(pte_same(*page_table, orig_pte))) {
61608+
61609+#ifdef CONFIG_PAX_SEGMEXEC
61610+ if (anon && pax_find_mirror_vma(vma))
61611+ BUG_ON(!trylock_page(page));
61612+#endif
61613+
61614 flush_icache_page(vma, page);
61615 entry = mk_pte(page, vma->vm_page_prot);
61616 if (flags & FAULT_FLAG_WRITE)
61617@@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61618
61619 /* no need to invalidate: a not-present page won't be cached */
61620 update_mmu_cache(vma, address, page_table);
61621+
61622+#ifdef CONFIG_PAX_SEGMEXEC
61623+ if (anon)
61624+ pax_mirror_anon_pte(vma, address, page, ptl);
61625+ else
61626+ pax_mirror_file_pte(vma, address, page, ptl);
61627+#endif
61628+
61629 } else {
61630 if (charged)
61631 mem_cgroup_uncharge_page(page);
61632@@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61633 if (flags & FAULT_FLAG_WRITE)
61634 flush_tlb_fix_spurious_fault(vma, address);
61635 }
61636+
61637+#ifdef CONFIG_PAX_SEGMEXEC
61638+ pax_mirror_pte(vma, address, pte, pmd, ptl);
61639+ return 0;
61640+#endif
61641+
61642 unlock:
61643 pte_unmap_unlock(pte, ptl);
61644 return 0;
61645@@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61646 pmd_t *pmd;
61647 pte_t *pte;
61648
61649+#ifdef CONFIG_PAX_SEGMEXEC
61650+ struct vm_area_struct *vma_m;
61651+#endif
61652+
61653 __set_current_state(TASK_RUNNING);
61654
61655 count_vm_event(PGFAULT);
61656@@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61657 if (unlikely(is_vm_hugetlb_page(vma)))
61658 return hugetlb_fault(mm, vma, address, flags);
61659
61660+#ifdef CONFIG_PAX_SEGMEXEC
61661+ vma_m = pax_find_mirror_vma(vma);
61662+ if (vma_m) {
61663+ unsigned long address_m;
61664+ pgd_t *pgd_m;
61665+ pud_t *pud_m;
61666+ pmd_t *pmd_m;
61667+
61668+ if (vma->vm_start > vma_m->vm_start) {
61669+ address_m = address;
61670+ address -= SEGMEXEC_TASK_SIZE;
61671+ vma = vma_m;
61672+ } else
61673+ address_m = address + SEGMEXEC_TASK_SIZE;
61674+
61675+ pgd_m = pgd_offset(mm, address_m);
61676+ pud_m = pud_alloc(mm, pgd_m, address_m);
61677+ if (!pud_m)
61678+ return VM_FAULT_OOM;
61679+ pmd_m = pmd_alloc(mm, pud_m, address_m);
61680+ if (!pmd_m)
61681+ return VM_FAULT_OOM;
61682+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61683+ return VM_FAULT_OOM;
61684+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61685+ }
61686+#endif
61687+
61688 pgd = pgd_offset(mm, address);
61689 pud = pud_alloc(mm, pgd, address);
61690 if (!pud)
61691@@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61692 * run pte_offset_map on the pmd, if an huge pmd could
61693 * materialize from under us from a different thread.
61694 */
61695- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61696+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61697 return VM_FAULT_OOM;
61698 /* if an huge pmd materialized from under us just retry later */
61699 if (unlikely(pmd_trans_huge(*pmd)))
61700@@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61701 gate_vma.vm_start = FIXADDR_USER_START;
61702 gate_vma.vm_end = FIXADDR_USER_END;
61703 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61704- gate_vma.vm_page_prot = __P101;
61705+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61706 /*
61707 * Make sure the vDSO gets into every core dump.
61708 * Dumping its contents makes post-mortem fully interpretable later
61709diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61710--- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61711+++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61712@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61713
61714 int sysctl_memory_failure_recovery __read_mostly = 1;
61715
61716-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61717+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61718
61719 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61720
61721@@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61722 }
61723
61724 nr_pages = 1 << compound_trans_order(hpage);
61725- atomic_long_add(nr_pages, &mce_bad_pages);
61726+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61727
61728 /*
61729 * We need/can do nothing about count=0 pages.
61730@@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61731 if (!PageHWPoison(hpage)
61732 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61733 || (p != hpage && TestSetPageHWPoison(hpage))) {
61734- atomic_long_sub(nr_pages, &mce_bad_pages);
61735+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61736 return 0;
61737 }
61738 set_page_hwpoison_huge_page(hpage);
61739@@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61740 }
61741 if (hwpoison_filter(p)) {
61742 if (TestClearPageHWPoison(p))
61743- atomic_long_sub(nr_pages, &mce_bad_pages);
61744+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61745 unlock_page(hpage);
61746 put_page(hpage);
61747 return 0;
61748@@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61749 return 0;
61750 }
61751 if (TestClearPageHWPoison(p))
61752- atomic_long_sub(nr_pages, &mce_bad_pages);
61753+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61754 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61755 return 0;
61756 }
61757@@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61758 */
61759 if (TestClearPageHWPoison(page)) {
61760 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61761- atomic_long_sub(nr_pages, &mce_bad_pages);
61762+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61763 freeit = 1;
61764 if (PageHuge(page))
61765 clear_page_hwpoison_huge_page(page);
61766@@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61767 }
61768 done:
61769 if (!PageHWPoison(hpage))
61770- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61771+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61772 set_page_hwpoison_huge_page(hpage);
61773 dequeue_hwpoisoned_huge_page(hpage);
61774 /* keep elevated page count for bad page */
61775@@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61776 return ret;
61777
61778 done:
61779- atomic_long_add(1, &mce_bad_pages);
61780+ atomic_long_add_unchecked(1, &mce_bad_pages);
61781 SetPageHWPoison(page);
61782 /* keep elevated page count for bad page */
61783 return ret;
61784diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61785--- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61786+++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61787@@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61788 unsigned long vmstart;
61789 unsigned long vmend;
61790
61791+#ifdef CONFIG_PAX_SEGMEXEC
61792+ struct vm_area_struct *vma_m;
61793+#endif
61794+
61795 vma = find_vma_prev(mm, start, &prev);
61796 if (!vma || vma->vm_start > start)
61797 return -EFAULT;
61798@@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61799 err = policy_vma(vma, new_pol);
61800 if (err)
61801 goto out;
61802+
61803+#ifdef CONFIG_PAX_SEGMEXEC
61804+ vma_m = pax_find_mirror_vma(vma);
61805+ if (vma_m) {
61806+ err = policy_vma(vma_m, new_pol);
61807+ if (err)
61808+ goto out;
61809+ }
61810+#endif
61811+
61812 }
61813
61814 out:
61815@@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61816
61817 if (end < start)
61818 return -EINVAL;
61819+
61820+#ifdef CONFIG_PAX_SEGMEXEC
61821+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61822+ if (end > SEGMEXEC_TASK_SIZE)
61823+ return -EINVAL;
61824+ } else
61825+#endif
61826+
61827+ if (end > TASK_SIZE)
61828+ return -EINVAL;
61829+
61830 if (end == start)
61831 return 0;
61832
61833@@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61834 if (!mm)
61835 goto out;
61836
61837+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61838+ if (mm != current->mm &&
61839+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61840+ err = -EPERM;
61841+ goto out;
61842+ }
61843+#endif
61844+
61845 /*
61846 * Check if this process has the right to modify the specified
61847 * process. The right exists if the process has administrative
61848@@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61849 rcu_read_lock();
61850 tcred = __task_cred(task);
61851 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61852- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61853- !capable(CAP_SYS_NICE)) {
61854+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61855 rcu_read_unlock();
61856 err = -EPERM;
61857 goto out;
61858@@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61859
61860 if (file) {
61861 seq_printf(m, " file=");
61862- seq_path(m, &file->f_path, "\n\t= ");
61863+ seq_path(m, &file->f_path, "\n\t\\= ");
61864 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61865 seq_printf(m, " heap");
61866 } else if (vma->vm_start <= mm->start_stack &&
61867diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61868--- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61869+++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61870@@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61871 unsigned long chunk_start;
61872 int err;
61873
61874+ pax_track_stack();
61875+
61876 task_nodes = cpuset_mems_allowed(task);
61877
61878 err = -ENOMEM;
61879@@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61880 if (!mm)
61881 return -EINVAL;
61882
61883+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61884+ if (mm != current->mm &&
61885+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61886+ err = -EPERM;
61887+ goto out;
61888+ }
61889+#endif
61890+
61891 /*
61892 * Check if this process has the right to modify the specified
61893 * process. The right exists if the process has administrative
61894@@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61895 rcu_read_lock();
61896 tcred = __task_cred(task);
61897 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61898- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61899- !capable(CAP_SYS_NICE)) {
61900+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61901 rcu_read_unlock();
61902 err = -EPERM;
61903 goto out;
61904diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61905--- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61906+++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61907@@ -13,6 +13,7 @@
61908 #include <linux/pagemap.h>
61909 #include <linux/mempolicy.h>
61910 #include <linux/syscalls.h>
61911+#include <linux/security.h>
61912 #include <linux/sched.h>
61913 #include <linux/module.h>
61914 #include <linux/rmap.h>
61915@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61916 return -EINVAL;
61917 if (end == start)
61918 return 0;
61919+ if (end > TASK_SIZE)
61920+ return -EINVAL;
61921+
61922 vma = find_vma_prev(current->mm, start, &prev);
61923 if (!vma || vma->vm_start > start)
61924 return -ENOMEM;
61925@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61926 for (nstart = start ; ; ) {
61927 unsigned int newflags;
61928
61929+#ifdef CONFIG_PAX_SEGMEXEC
61930+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61931+ break;
61932+#endif
61933+
61934 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61935
61936 newflags = vma->vm_flags | VM_LOCKED;
61937@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61938 lock_limit >>= PAGE_SHIFT;
61939
61940 /* check against resource limits */
61941+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61942 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61943 error = do_mlock(start, len, 1);
61944 up_write(&current->mm->mmap_sem);
61945@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61946 static int do_mlockall(int flags)
61947 {
61948 struct vm_area_struct * vma, * prev = NULL;
61949- unsigned int def_flags = 0;
61950
61951 if (flags & MCL_FUTURE)
61952- def_flags = VM_LOCKED;
61953- current->mm->def_flags = def_flags;
61954+ current->mm->def_flags |= VM_LOCKED;
61955+ else
61956+ current->mm->def_flags &= ~VM_LOCKED;
61957 if (flags == MCL_FUTURE)
61958 goto out;
61959
61960 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61961- unsigned int newflags;
61962+ unsigned long newflags;
61963+
61964+#ifdef CONFIG_PAX_SEGMEXEC
61965+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61966+ break;
61967+#endif
61968
61969+ BUG_ON(vma->vm_end > TASK_SIZE);
61970 newflags = vma->vm_flags | VM_LOCKED;
61971 if (!(flags & MCL_CURRENT))
61972 newflags &= ~VM_LOCKED;
61973@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61974 lock_limit >>= PAGE_SHIFT;
61975
61976 ret = -ENOMEM;
61977+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61978 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61979 capable(CAP_IPC_LOCK))
61980 ret = do_mlockall(flags);
61981diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
61982--- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
61983+++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
61984@@ -46,6 +46,16 @@
61985 #define arch_rebalance_pgtables(addr, len) (addr)
61986 #endif
61987
61988+static inline void verify_mm_writelocked(struct mm_struct *mm)
61989+{
61990+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61991+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61992+ up_read(&mm->mmap_sem);
61993+ BUG();
61994+ }
61995+#endif
61996+}
61997+
61998 static void unmap_region(struct mm_struct *mm,
61999 struct vm_area_struct *vma, struct vm_area_struct *prev,
62000 unsigned long start, unsigned long end);
62001@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
62002 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
62003 *
62004 */
62005-pgprot_t protection_map[16] = {
62006+pgprot_t protection_map[16] __read_only = {
62007 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
62008 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
62009 };
62010
62011 pgprot_t vm_get_page_prot(unsigned long vm_flags)
62012 {
62013- return __pgprot(pgprot_val(protection_map[vm_flags &
62014+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
62015 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
62016 pgprot_val(arch_vm_get_page_prot(vm_flags)));
62017+
62018+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62019+ if (!(__supported_pte_mask & _PAGE_NX) &&
62020+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
62021+ (vm_flags & (VM_READ | VM_WRITE)))
62022+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
62023+#endif
62024+
62025+ return prot;
62026 }
62027 EXPORT_SYMBOL(vm_get_page_prot);
62028
62029 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
62030 int sysctl_overcommit_ratio = 50; /* default is 50% */
62031 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
62032+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
62033 struct percpu_counter vm_committed_as;
62034
62035 /*
62036@@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
62037 struct vm_area_struct *next = vma->vm_next;
62038
62039 might_sleep();
62040+ BUG_ON(vma->vm_mirror);
62041 if (vma->vm_ops && vma->vm_ops->close)
62042 vma->vm_ops->close(vma);
62043 if (vma->vm_file) {
62044@@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
62045 * not page aligned -Ram Gupta
62046 */
62047 rlim = rlimit(RLIMIT_DATA);
62048+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
62049 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
62050 (mm->end_data - mm->start_data) > rlim)
62051 goto out;
62052@@ -719,6 +741,12 @@ static int
62053 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
62054 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62055 {
62056+
62057+#ifdef CONFIG_PAX_SEGMEXEC
62058+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
62059+ return 0;
62060+#endif
62061+
62062 if (is_mergeable_vma(vma, file, vm_flags) &&
62063 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62064 if (vma->vm_pgoff == vm_pgoff)
62065@@ -738,6 +766,12 @@ static int
62066 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
62067 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62068 {
62069+
62070+#ifdef CONFIG_PAX_SEGMEXEC
62071+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
62072+ return 0;
62073+#endif
62074+
62075 if (is_mergeable_vma(vma, file, vm_flags) &&
62076 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62077 pgoff_t vm_pglen;
62078@@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
62079 struct vm_area_struct *vma_merge(struct mm_struct *mm,
62080 struct vm_area_struct *prev, unsigned long addr,
62081 unsigned long end, unsigned long vm_flags,
62082- struct anon_vma *anon_vma, struct file *file,
62083+ struct anon_vma *anon_vma, struct file *file,
62084 pgoff_t pgoff, struct mempolicy *policy)
62085 {
62086 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
62087 struct vm_area_struct *area, *next;
62088 int err;
62089
62090+#ifdef CONFIG_PAX_SEGMEXEC
62091+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
62092+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
62093+
62094+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
62095+#endif
62096+
62097 /*
62098 * We later require that vma->vm_flags == vm_flags,
62099 * so this tests vma->vm_flags & VM_SPECIAL, too.
62100@@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
62101 if (next && next->vm_end == end) /* cases 6, 7, 8 */
62102 next = next->vm_next;
62103
62104+#ifdef CONFIG_PAX_SEGMEXEC
62105+ if (prev)
62106+ prev_m = pax_find_mirror_vma(prev);
62107+ if (area)
62108+ area_m = pax_find_mirror_vma(area);
62109+ if (next)
62110+ next_m = pax_find_mirror_vma(next);
62111+#endif
62112+
62113 /*
62114 * Can it merge with the predecessor?
62115 */
62116@@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
62117 /* cases 1, 6 */
62118 err = vma_adjust(prev, prev->vm_start,
62119 next->vm_end, prev->vm_pgoff, NULL);
62120- } else /* cases 2, 5, 7 */
62121+
62122+#ifdef CONFIG_PAX_SEGMEXEC
62123+ if (!err && prev_m)
62124+ err = vma_adjust(prev_m, prev_m->vm_start,
62125+ next_m->vm_end, prev_m->vm_pgoff, NULL);
62126+#endif
62127+
62128+ } else { /* cases 2, 5, 7 */
62129 err = vma_adjust(prev, prev->vm_start,
62130 end, prev->vm_pgoff, NULL);
62131+
62132+#ifdef CONFIG_PAX_SEGMEXEC
62133+ if (!err && prev_m)
62134+ err = vma_adjust(prev_m, prev_m->vm_start,
62135+ end_m, prev_m->vm_pgoff, NULL);
62136+#endif
62137+
62138+ }
62139 if (err)
62140 return NULL;
62141 khugepaged_enter_vma_merge(prev);
62142@@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
62143 mpol_equal(policy, vma_policy(next)) &&
62144 can_vma_merge_before(next, vm_flags,
62145 anon_vma, file, pgoff+pglen)) {
62146- if (prev && addr < prev->vm_end) /* case 4 */
62147+ if (prev && addr < prev->vm_end) { /* case 4 */
62148 err = vma_adjust(prev, prev->vm_start,
62149 addr, prev->vm_pgoff, NULL);
62150- else /* cases 3, 8 */
62151+
62152+#ifdef CONFIG_PAX_SEGMEXEC
62153+ if (!err && prev_m)
62154+ err = vma_adjust(prev_m, prev_m->vm_start,
62155+ addr_m, prev_m->vm_pgoff, NULL);
62156+#endif
62157+
62158+ } else { /* cases 3, 8 */
62159 err = vma_adjust(area, addr, next->vm_end,
62160 next->vm_pgoff - pglen, NULL);
62161+
62162+#ifdef CONFIG_PAX_SEGMEXEC
62163+ if (!err && area_m)
62164+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
62165+ next_m->vm_pgoff - pglen, NULL);
62166+#endif
62167+
62168+ }
62169 if (err)
62170 return NULL;
62171 khugepaged_enter_vma_merge(area);
62172@@ -958,14 +1038,11 @@ none:
62173 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62174 struct file *file, long pages)
62175 {
62176- const unsigned long stack_flags
62177- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62178-
62179 if (file) {
62180 mm->shared_vm += pages;
62181 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62182 mm->exec_vm += pages;
62183- } else if (flags & stack_flags)
62184+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62185 mm->stack_vm += pages;
62186 if (flags & (VM_RESERVED|VM_IO))
62187 mm->reserved_vm += pages;
62188@@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62189 * (the exception is when the underlying filesystem is noexec
62190 * mounted, in which case we dont add PROT_EXEC.)
62191 */
62192- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62193+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62194 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62195 prot |= PROT_EXEC;
62196
62197@@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62198 /* Obtain the address to map to. we verify (or select) it and ensure
62199 * that it represents a valid section of the address space.
62200 */
62201- addr = get_unmapped_area(file, addr, len, pgoff, flags);
62202+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62203 if (addr & ~PAGE_MASK)
62204 return addr;
62205
62206@@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62207 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62208 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62209
62210+#ifdef CONFIG_PAX_MPROTECT
62211+ if (mm->pax_flags & MF_PAX_MPROTECT) {
62212+#ifndef CONFIG_PAX_MPROTECT_COMPAT
62213+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62214+ gr_log_rwxmmap(file);
62215+
62216+#ifdef CONFIG_PAX_EMUPLT
62217+ vm_flags &= ~VM_EXEC;
62218+#else
62219+ return -EPERM;
62220+#endif
62221+
62222+ }
62223+
62224+ if (!(vm_flags & VM_EXEC))
62225+ vm_flags &= ~VM_MAYEXEC;
62226+#else
62227+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62228+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62229+#endif
62230+ else
62231+ vm_flags &= ~VM_MAYWRITE;
62232+ }
62233+#endif
62234+
62235+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62236+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62237+ vm_flags &= ~VM_PAGEEXEC;
62238+#endif
62239+
62240 if (flags & MAP_LOCKED)
62241 if (!can_do_mlock())
62242 return -EPERM;
62243@@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62244 locked += mm->locked_vm;
62245 lock_limit = rlimit(RLIMIT_MEMLOCK);
62246 lock_limit >>= PAGE_SHIFT;
62247+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62248 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62249 return -EAGAIN;
62250 }
62251@@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62252 if (error)
62253 return error;
62254
62255+ if (!gr_acl_handle_mmap(file, prot))
62256+ return -EACCES;
62257+
62258 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62259 }
62260 EXPORT_SYMBOL(do_mmap_pgoff);
62261@@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62262 */
62263 int vma_wants_writenotify(struct vm_area_struct *vma)
62264 {
62265- unsigned int vm_flags = vma->vm_flags;
62266+ unsigned long vm_flags = vma->vm_flags;
62267
62268 /* If it was private or non-writable, the write bit is already clear */
62269- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62270+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62271 return 0;
62272
62273 /* The backer wishes to know when pages are first written to? */
62274@@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62275 unsigned long charged = 0;
62276 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62277
62278+#ifdef CONFIG_PAX_SEGMEXEC
62279+ struct vm_area_struct *vma_m = NULL;
62280+#endif
62281+
62282+ /*
62283+ * mm->mmap_sem is required to protect against another thread
62284+ * changing the mappings in case we sleep.
62285+ */
62286+ verify_mm_writelocked(mm);
62287+
62288 /* Clear old maps */
62289 error = -ENOMEM;
62290-munmap_back:
62291 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62292 if (vma && vma->vm_start < addr + len) {
62293 if (do_munmap(mm, addr, len))
62294 return -ENOMEM;
62295- goto munmap_back;
62296+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62297+ BUG_ON(vma && vma->vm_start < addr + len);
62298 }
62299
62300 /* Check against address space limit. */
62301@@ -1295,6 +1416,16 @@ munmap_back:
62302 goto unacct_error;
62303 }
62304
62305+#ifdef CONFIG_PAX_SEGMEXEC
62306+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62307+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62308+ if (!vma_m) {
62309+ error = -ENOMEM;
62310+ goto free_vma;
62311+ }
62312+ }
62313+#endif
62314+
62315 vma->vm_mm = mm;
62316 vma->vm_start = addr;
62317 vma->vm_end = addr + len;
62318@@ -1318,6 +1449,19 @@ munmap_back:
62319 error = file->f_op->mmap(file, vma);
62320 if (error)
62321 goto unmap_and_free_vma;
62322+
62323+#ifdef CONFIG_PAX_SEGMEXEC
62324+ if (vma_m && (vm_flags & VM_EXECUTABLE))
62325+ added_exe_file_vma(mm);
62326+#endif
62327+
62328+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62329+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62330+ vma->vm_flags |= VM_PAGEEXEC;
62331+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62332+ }
62333+#endif
62334+
62335 if (vm_flags & VM_EXECUTABLE)
62336 added_exe_file_vma(mm);
62337
62338@@ -1353,6 +1497,11 @@ munmap_back:
62339 vma_link(mm, vma, prev, rb_link, rb_parent);
62340 file = vma->vm_file;
62341
62342+#ifdef CONFIG_PAX_SEGMEXEC
62343+ if (vma_m)
62344+ BUG_ON(pax_mirror_vma(vma_m, vma));
62345+#endif
62346+
62347 /* Once vma denies write, undo our temporary denial count */
62348 if (correct_wcount)
62349 atomic_inc(&inode->i_writecount);
62350@@ -1361,6 +1510,7 @@ out:
62351
62352 mm->total_vm += len >> PAGE_SHIFT;
62353 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62354+ track_exec_limit(mm, addr, addr + len, vm_flags);
62355 if (vm_flags & VM_LOCKED) {
62356 if (!mlock_vma_pages_range(vma, addr, addr + len))
62357 mm->locked_vm += (len >> PAGE_SHIFT);
62358@@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62359 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62360 charged = 0;
62361 free_vma:
62362+
62363+#ifdef CONFIG_PAX_SEGMEXEC
62364+ if (vma_m)
62365+ kmem_cache_free(vm_area_cachep, vma_m);
62366+#endif
62367+
62368 kmem_cache_free(vm_area_cachep, vma);
62369 unacct_error:
62370 if (charged)
62371@@ -1385,6 +1541,44 @@ unacct_error:
62372 return error;
62373 }
62374
62375+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62376+{
62377+ if (!vma) {
62378+#ifdef CONFIG_STACK_GROWSUP
62379+ if (addr > sysctl_heap_stack_gap)
62380+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62381+ else
62382+ vma = find_vma(current->mm, 0);
62383+ if (vma && (vma->vm_flags & VM_GROWSUP))
62384+ return false;
62385+#endif
62386+ return true;
62387+ }
62388+
62389+ if (addr + len > vma->vm_start)
62390+ return false;
62391+
62392+ if (vma->vm_flags & VM_GROWSDOWN)
62393+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62394+#ifdef CONFIG_STACK_GROWSUP
62395+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62396+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62397+#endif
62398+
62399+ return true;
62400+}
62401+
62402+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62403+{
62404+ if (vma->vm_start < len)
62405+ return -ENOMEM;
62406+ if (!(vma->vm_flags & VM_GROWSDOWN))
62407+ return vma->vm_start - len;
62408+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
62409+ return vma->vm_start - len - sysctl_heap_stack_gap;
62410+ return -ENOMEM;
62411+}
62412+
62413 /* Get an address range which is currently unmapped.
62414 * For shmat() with addr=0.
62415 *
62416@@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62417 if (flags & MAP_FIXED)
62418 return addr;
62419
62420+#ifdef CONFIG_PAX_RANDMMAP
62421+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62422+#endif
62423+
62424 if (addr) {
62425 addr = PAGE_ALIGN(addr);
62426- vma = find_vma(mm, addr);
62427- if (TASK_SIZE - len >= addr &&
62428- (!vma || addr + len <= vma->vm_start))
62429- return addr;
62430+ if (TASK_SIZE - len >= addr) {
62431+ vma = find_vma(mm, addr);
62432+ if (check_heap_stack_gap(vma, addr, len))
62433+ return addr;
62434+ }
62435 }
62436 if (len > mm->cached_hole_size) {
62437- start_addr = addr = mm->free_area_cache;
62438+ start_addr = addr = mm->free_area_cache;
62439 } else {
62440- start_addr = addr = TASK_UNMAPPED_BASE;
62441- mm->cached_hole_size = 0;
62442+ start_addr = addr = mm->mmap_base;
62443+ mm->cached_hole_size = 0;
62444 }
62445
62446 full_search:
62447@@ -1433,34 +1632,40 @@ full_search:
62448 * Start a new search - just in case we missed
62449 * some holes.
62450 */
62451- if (start_addr != TASK_UNMAPPED_BASE) {
62452- addr = TASK_UNMAPPED_BASE;
62453- start_addr = addr;
62454+ if (start_addr != mm->mmap_base) {
62455+ start_addr = addr = mm->mmap_base;
62456 mm->cached_hole_size = 0;
62457 goto full_search;
62458 }
62459 return -ENOMEM;
62460 }
62461- if (!vma || addr + len <= vma->vm_start) {
62462- /*
62463- * Remember the place where we stopped the search:
62464- */
62465- mm->free_area_cache = addr + len;
62466- return addr;
62467- }
62468+ if (check_heap_stack_gap(vma, addr, len))
62469+ break;
62470 if (addr + mm->cached_hole_size < vma->vm_start)
62471 mm->cached_hole_size = vma->vm_start - addr;
62472 addr = vma->vm_end;
62473 }
62474+
62475+ /*
62476+ * Remember the place where we stopped the search:
62477+ */
62478+ mm->free_area_cache = addr + len;
62479+ return addr;
62480 }
62481 #endif
62482
62483 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62484 {
62485+
62486+#ifdef CONFIG_PAX_SEGMEXEC
62487+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62488+ return;
62489+#endif
62490+
62491 /*
62492 * Is this a new hole at the lowest possible address?
62493 */
62494- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62495+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62496 mm->free_area_cache = addr;
62497 mm->cached_hole_size = ~0UL;
62498 }
62499@@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62500 {
62501 struct vm_area_struct *vma;
62502 struct mm_struct *mm = current->mm;
62503- unsigned long addr = addr0;
62504+ unsigned long base = mm->mmap_base, addr = addr0;
62505
62506 /* requested length too big for entire address space */
62507 if (len > TASK_SIZE)
62508@@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62509 if (flags & MAP_FIXED)
62510 return addr;
62511
62512+#ifdef CONFIG_PAX_RANDMMAP
62513+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62514+#endif
62515+
62516 /* requesting a specific address */
62517 if (addr) {
62518 addr = PAGE_ALIGN(addr);
62519- vma = find_vma(mm, addr);
62520- if (TASK_SIZE - len >= addr &&
62521- (!vma || addr + len <= vma->vm_start))
62522- return addr;
62523+ if (TASK_SIZE - len >= addr) {
62524+ vma = find_vma(mm, addr);
62525+ if (check_heap_stack_gap(vma, addr, len))
62526+ return addr;
62527+ }
62528 }
62529
62530 /* check if free_area_cache is useful for us */
62531@@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62532 /* make sure it can fit in the remaining address space */
62533 if (addr > len) {
62534 vma = find_vma(mm, addr-len);
62535- if (!vma || addr <= vma->vm_start)
62536+ if (check_heap_stack_gap(vma, addr - len, len))
62537 /* remember the address as a hint for next time */
62538 return (mm->free_area_cache = addr-len);
62539 }
62540@@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62541 * return with success:
62542 */
62543 vma = find_vma(mm, addr);
62544- if (!vma || addr+len <= vma->vm_start)
62545+ if (check_heap_stack_gap(vma, addr, len))
62546 /* remember the address as a hint for next time */
62547 return (mm->free_area_cache = addr);
62548
62549@@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62550 mm->cached_hole_size = vma->vm_start - addr;
62551
62552 /* try just below the current vma->vm_start */
62553- addr = vma->vm_start-len;
62554- } while (len < vma->vm_start);
62555+ addr = skip_heap_stack_gap(vma, len);
62556+ } while (!IS_ERR_VALUE(addr));
62557
62558 bottomup:
62559 /*
62560@@ -1544,13 +1754,21 @@ bottomup:
62561 * can happen with large stack limits and large mmap()
62562 * allocations.
62563 */
62564+ mm->mmap_base = TASK_UNMAPPED_BASE;
62565+
62566+#ifdef CONFIG_PAX_RANDMMAP
62567+ if (mm->pax_flags & MF_PAX_RANDMMAP)
62568+ mm->mmap_base += mm->delta_mmap;
62569+#endif
62570+
62571+ mm->free_area_cache = mm->mmap_base;
62572 mm->cached_hole_size = ~0UL;
62573- mm->free_area_cache = TASK_UNMAPPED_BASE;
62574 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62575 /*
62576 * Restore the topdown base:
62577 */
62578- mm->free_area_cache = mm->mmap_base;
62579+ mm->mmap_base = base;
62580+ mm->free_area_cache = base;
62581 mm->cached_hole_size = ~0UL;
62582
62583 return addr;
62584@@ -1559,6 +1777,12 @@ bottomup:
62585
62586 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62587 {
62588+
62589+#ifdef CONFIG_PAX_SEGMEXEC
62590+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62591+ return;
62592+#endif
62593+
62594 /*
62595 * Is this a new hole at the highest possible address?
62596 */
62597@@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62598 mm->free_area_cache = addr;
62599
62600 /* dont allow allocations above current base */
62601- if (mm->free_area_cache > mm->mmap_base)
62602+ if (mm->free_area_cache > mm->mmap_base) {
62603 mm->free_area_cache = mm->mmap_base;
62604+ mm->cached_hole_size = ~0UL;
62605+ }
62606 }
62607
62608 unsigned long
62609@@ -1675,6 +1901,28 @@ out:
62610 return prev ? prev->vm_next : vma;
62611 }
62612
62613+#ifdef CONFIG_PAX_SEGMEXEC
62614+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62615+{
62616+ struct vm_area_struct *vma_m;
62617+
62618+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62619+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62620+ BUG_ON(vma->vm_mirror);
62621+ return NULL;
62622+ }
62623+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62624+ vma_m = vma->vm_mirror;
62625+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62626+ BUG_ON(vma->vm_file != vma_m->vm_file);
62627+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62628+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62629+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62630+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62631+ return vma_m;
62632+}
62633+#endif
62634+
62635 /*
62636 * Verify that the stack growth is acceptable and
62637 * update accounting. This is shared with both the
62638@@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62639 return -ENOMEM;
62640
62641 /* Stack limit test */
62642+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
62643 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62644 return -ENOMEM;
62645
62646@@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62647 locked = mm->locked_vm + grow;
62648 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62649 limit >>= PAGE_SHIFT;
62650+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62651 if (locked > limit && !capable(CAP_IPC_LOCK))
62652 return -ENOMEM;
62653 }
62654@@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62655 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62656 * vma is the last one with address > vma->vm_end. Have to extend vma.
62657 */
62658+#ifndef CONFIG_IA64
62659+static
62660+#endif
62661 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62662 {
62663 int error;
62664+ bool locknext;
62665
62666 if (!(vma->vm_flags & VM_GROWSUP))
62667 return -EFAULT;
62668
62669+ /* Also guard against wrapping around to address 0. */
62670+ if (address < PAGE_ALIGN(address+1))
62671+ address = PAGE_ALIGN(address+1);
62672+ else
62673+ return -ENOMEM;
62674+
62675 /*
62676 * We must make sure the anon_vma is allocated
62677 * so that the anon_vma locking is not a noop.
62678 */
62679 if (unlikely(anon_vma_prepare(vma)))
62680 return -ENOMEM;
62681+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62682+ if (locknext && anon_vma_prepare(vma->vm_next))
62683+ return -ENOMEM;
62684 vma_lock_anon_vma(vma);
62685+ if (locknext)
62686+ vma_lock_anon_vma(vma->vm_next);
62687
62688 /*
62689 * vma->vm_start/vm_end cannot change under us because the caller
62690 * is required to hold the mmap_sem in read mode. We need the
62691- * anon_vma lock to serialize against concurrent expand_stacks.
62692- * Also guard against wrapping around to address 0.
62693+ * anon_vma locks to serialize against concurrent expand_stacks
62694+ * and expand_upwards.
62695 */
62696- if (address < PAGE_ALIGN(address+4))
62697- address = PAGE_ALIGN(address+4);
62698- else {
62699- vma_unlock_anon_vma(vma);
62700- return -ENOMEM;
62701- }
62702 error = 0;
62703
62704 /* Somebody else might have raced and expanded it already */
62705- if (address > vma->vm_end) {
62706+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62707+ error = -ENOMEM;
62708+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62709 unsigned long size, grow;
62710
62711 size = address - vma->vm_start;
62712@@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62713 }
62714 }
62715 }
62716+ if (locknext)
62717+ vma_unlock_anon_vma(vma->vm_next);
62718 vma_unlock_anon_vma(vma);
62719 khugepaged_enter_vma_merge(vma);
62720 return error;
62721@@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62722 unsigned long address)
62723 {
62724 int error;
62725+ bool lockprev = false;
62726+ struct vm_area_struct *prev;
62727
62728 /*
62729 * We must make sure the anon_vma is allocated
62730@@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62731 if (error)
62732 return error;
62733
62734+ prev = vma->vm_prev;
62735+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62736+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62737+#endif
62738+ if (lockprev && anon_vma_prepare(prev))
62739+ return -ENOMEM;
62740+ if (lockprev)
62741+ vma_lock_anon_vma(prev);
62742+
62743 vma_lock_anon_vma(vma);
62744
62745 /*
62746@@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62747 */
62748
62749 /* Somebody else might have raced and expanded it already */
62750- if (address < vma->vm_start) {
62751+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62752+ error = -ENOMEM;
62753+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62754 unsigned long size, grow;
62755
62756+#ifdef CONFIG_PAX_SEGMEXEC
62757+ struct vm_area_struct *vma_m;
62758+
62759+ vma_m = pax_find_mirror_vma(vma);
62760+#endif
62761+
62762 size = vma->vm_end - address;
62763 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62764
62765@@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62766 if (!error) {
62767 vma->vm_start = address;
62768 vma->vm_pgoff -= grow;
62769+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62770+
62771+#ifdef CONFIG_PAX_SEGMEXEC
62772+ if (vma_m) {
62773+ vma_m->vm_start -= grow << PAGE_SHIFT;
62774+ vma_m->vm_pgoff -= grow;
62775+ }
62776+#endif
62777+
62778 perf_event_mmap(vma);
62779 }
62780 }
62781 }
62782 vma_unlock_anon_vma(vma);
62783+ if (lockprev)
62784+ vma_unlock_anon_vma(prev);
62785 khugepaged_enter_vma_merge(vma);
62786 return error;
62787 }
62788@@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62789 do {
62790 long nrpages = vma_pages(vma);
62791
62792+#ifdef CONFIG_PAX_SEGMEXEC
62793+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62794+ vma = remove_vma(vma);
62795+ continue;
62796+ }
62797+#endif
62798+
62799 mm->total_vm -= nrpages;
62800 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62801 vma = remove_vma(vma);
62802@@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62803 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62804 vma->vm_prev = NULL;
62805 do {
62806+
62807+#ifdef CONFIG_PAX_SEGMEXEC
62808+ if (vma->vm_mirror) {
62809+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62810+ vma->vm_mirror->vm_mirror = NULL;
62811+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
62812+ vma->vm_mirror = NULL;
62813+ }
62814+#endif
62815+
62816 rb_erase(&vma->vm_rb, &mm->mm_rb);
62817 mm->map_count--;
62818 tail_vma = vma;
62819@@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62820 struct vm_area_struct *new;
62821 int err = -ENOMEM;
62822
62823+#ifdef CONFIG_PAX_SEGMEXEC
62824+ struct vm_area_struct *vma_m, *new_m = NULL;
62825+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62826+#endif
62827+
62828 if (is_vm_hugetlb_page(vma) && (addr &
62829 ~(huge_page_mask(hstate_vma(vma)))))
62830 return -EINVAL;
62831
62832+#ifdef CONFIG_PAX_SEGMEXEC
62833+ vma_m = pax_find_mirror_vma(vma);
62834+#endif
62835+
62836 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62837 if (!new)
62838 goto out_err;
62839
62840+#ifdef CONFIG_PAX_SEGMEXEC
62841+ if (vma_m) {
62842+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62843+ if (!new_m) {
62844+ kmem_cache_free(vm_area_cachep, new);
62845+ goto out_err;
62846+ }
62847+ }
62848+#endif
62849+
62850 /* most fields are the same, copy all, and then fixup */
62851 *new = *vma;
62852
62853@@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62854 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62855 }
62856
62857+#ifdef CONFIG_PAX_SEGMEXEC
62858+ if (vma_m) {
62859+ *new_m = *vma_m;
62860+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
62861+ new_m->vm_mirror = new;
62862+ new->vm_mirror = new_m;
62863+
62864+ if (new_below)
62865+ new_m->vm_end = addr_m;
62866+ else {
62867+ new_m->vm_start = addr_m;
62868+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62869+ }
62870+ }
62871+#endif
62872+
62873 pol = mpol_dup(vma_policy(vma));
62874 if (IS_ERR(pol)) {
62875 err = PTR_ERR(pol);
62876@@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62877 else
62878 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62879
62880+#ifdef CONFIG_PAX_SEGMEXEC
62881+ if (!err && vma_m) {
62882+ if (anon_vma_clone(new_m, vma_m))
62883+ goto out_free_mpol;
62884+
62885+ mpol_get(pol);
62886+ vma_set_policy(new_m, pol);
62887+
62888+ if (new_m->vm_file) {
62889+ get_file(new_m->vm_file);
62890+ if (vma_m->vm_flags & VM_EXECUTABLE)
62891+ added_exe_file_vma(mm);
62892+ }
62893+
62894+ if (new_m->vm_ops && new_m->vm_ops->open)
62895+ new_m->vm_ops->open(new_m);
62896+
62897+ if (new_below)
62898+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62899+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62900+ else
62901+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62902+
62903+ if (err) {
62904+ if (new_m->vm_ops && new_m->vm_ops->close)
62905+ new_m->vm_ops->close(new_m);
62906+ if (new_m->vm_file) {
62907+ if (vma_m->vm_flags & VM_EXECUTABLE)
62908+ removed_exe_file_vma(mm);
62909+ fput(new_m->vm_file);
62910+ }
62911+ mpol_put(pol);
62912+ }
62913+ }
62914+#endif
62915+
62916 /* Success. */
62917 if (!err)
62918 return 0;
62919@@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62920 removed_exe_file_vma(mm);
62921 fput(new->vm_file);
62922 }
62923- unlink_anon_vmas(new);
62924 out_free_mpol:
62925 mpol_put(pol);
62926 out_free_vma:
62927+
62928+#ifdef CONFIG_PAX_SEGMEXEC
62929+ if (new_m) {
62930+ unlink_anon_vmas(new_m);
62931+ kmem_cache_free(vm_area_cachep, new_m);
62932+ }
62933+#endif
62934+
62935+ unlink_anon_vmas(new);
62936 kmem_cache_free(vm_area_cachep, new);
62937 out_err:
62938 return err;
62939@@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62940 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62941 unsigned long addr, int new_below)
62942 {
62943+
62944+#ifdef CONFIG_PAX_SEGMEXEC
62945+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62946+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62947+ if (mm->map_count >= sysctl_max_map_count-1)
62948+ return -ENOMEM;
62949+ } else
62950+#endif
62951+
62952 if (mm->map_count >= sysctl_max_map_count)
62953 return -ENOMEM;
62954
62955@@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62956 * work. This now handles partial unmappings.
62957 * Jeremy Fitzhardinge <jeremy@goop.org>
62958 */
62959+#ifdef CONFIG_PAX_SEGMEXEC
62960 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62961 {
62962+ int ret = __do_munmap(mm, start, len);
62963+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62964+ return ret;
62965+
62966+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62967+}
62968+
62969+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62970+#else
62971+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62972+#endif
62973+{
62974 unsigned long end;
62975 struct vm_area_struct *vma, *prev, *last;
62976
62977+ /*
62978+ * mm->mmap_sem is required to protect against another thread
62979+ * changing the mappings in case we sleep.
62980+ */
62981+ verify_mm_writelocked(mm);
62982+
62983 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62984 return -EINVAL;
62985
62986@@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
62987 /* Fix up all other VM information */
62988 remove_vma_list(mm, vma);
62989
62990+ track_exec_limit(mm, start, end, 0UL);
62991+
62992 return 0;
62993 }
62994
62995@@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62996
62997 profile_munmap(addr);
62998
62999+#ifdef CONFIG_PAX_SEGMEXEC
63000+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
63001+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
63002+ return -EINVAL;
63003+#endif
63004+
63005 down_write(&mm->mmap_sem);
63006 ret = do_munmap(mm, addr, len);
63007 up_write(&mm->mmap_sem);
63008 return ret;
63009 }
63010
63011-static inline void verify_mm_writelocked(struct mm_struct *mm)
63012-{
63013-#ifdef CONFIG_DEBUG_VM
63014- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
63015- WARN_ON(1);
63016- up_read(&mm->mmap_sem);
63017- }
63018-#endif
63019-}
63020-
63021 /*
63022 * this is really a simplified "do_mmap". it only handles
63023 * anonymous maps. eventually we may be able to do some
63024@@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
63025 struct rb_node ** rb_link, * rb_parent;
63026 pgoff_t pgoff = addr >> PAGE_SHIFT;
63027 int error;
63028+ unsigned long charged;
63029
63030 len = PAGE_ALIGN(len);
63031 if (!len)
63032@@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
63033
63034 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
63035
63036+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
63037+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
63038+ flags &= ~VM_EXEC;
63039+
63040+#ifdef CONFIG_PAX_MPROTECT
63041+ if (mm->pax_flags & MF_PAX_MPROTECT)
63042+ flags &= ~VM_MAYEXEC;
63043+#endif
63044+
63045+ }
63046+#endif
63047+
63048 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
63049 if (error & ~PAGE_MASK)
63050 return error;
63051
63052+ charged = len >> PAGE_SHIFT;
63053+
63054 /*
63055 * mlock MCL_FUTURE?
63056 */
63057 if (mm->def_flags & VM_LOCKED) {
63058 unsigned long locked, lock_limit;
63059- locked = len >> PAGE_SHIFT;
63060+ locked = charged;
63061 locked += mm->locked_vm;
63062 lock_limit = rlimit(RLIMIT_MEMLOCK);
63063 lock_limit >>= PAGE_SHIFT;
63064@@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
63065 /*
63066 * Clear old maps. this also does some error checking for us
63067 */
63068- munmap_back:
63069 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63070 if (vma && vma->vm_start < addr + len) {
63071 if (do_munmap(mm, addr, len))
63072 return -ENOMEM;
63073- goto munmap_back;
63074+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63075+ BUG_ON(vma && vma->vm_start < addr + len);
63076 }
63077
63078 /* Check against address space limits *after* clearing old maps... */
63079- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
63080+ if (!may_expand_vm(mm, charged))
63081 return -ENOMEM;
63082
63083 if (mm->map_count > sysctl_max_map_count)
63084 return -ENOMEM;
63085
63086- if (security_vm_enough_memory(len >> PAGE_SHIFT))
63087+ if (security_vm_enough_memory(charged))
63088 return -ENOMEM;
63089
63090 /* Can we just expand an old private anonymous mapping? */
63091@@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
63092 */
63093 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63094 if (!vma) {
63095- vm_unacct_memory(len >> PAGE_SHIFT);
63096+ vm_unacct_memory(charged);
63097 return -ENOMEM;
63098 }
63099
63100@@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
63101 vma_link(mm, vma, prev, rb_link, rb_parent);
63102 out:
63103 perf_event_mmap(vma);
63104- mm->total_vm += len >> PAGE_SHIFT;
63105+ mm->total_vm += charged;
63106 if (flags & VM_LOCKED) {
63107 if (!mlock_vma_pages_range(vma, addr, addr + len))
63108- mm->locked_vm += (len >> PAGE_SHIFT);
63109+ mm->locked_vm += charged;
63110 }
63111+ track_exec_limit(mm, addr, addr + len, flags);
63112 return addr;
63113 }
63114
63115@@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
63116 * Walk the list again, actually closing and freeing it,
63117 * with preemption enabled, without holding any MM locks.
63118 */
63119- while (vma)
63120+ while (vma) {
63121+ vma->vm_mirror = NULL;
63122 vma = remove_vma(vma);
63123+ }
63124
63125 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
63126 }
63127@@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
63128 struct vm_area_struct * __vma, * prev;
63129 struct rb_node ** rb_link, * rb_parent;
63130
63131+#ifdef CONFIG_PAX_SEGMEXEC
63132+ struct vm_area_struct *vma_m = NULL;
63133+#endif
63134+
63135+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
63136+ return -EPERM;
63137+
63138 /*
63139 * The vm_pgoff of a purely anonymous vma should be irrelevant
63140 * until its first write fault, when page's anon_vma and index
63141@@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
63142 if ((vma->vm_flags & VM_ACCOUNT) &&
63143 security_vm_enough_memory_mm(mm, vma_pages(vma)))
63144 return -ENOMEM;
63145+
63146+#ifdef CONFIG_PAX_SEGMEXEC
63147+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
63148+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63149+ if (!vma_m)
63150+ return -ENOMEM;
63151+ }
63152+#endif
63153+
63154 vma_link(mm, vma, prev, rb_link, rb_parent);
63155+
63156+#ifdef CONFIG_PAX_SEGMEXEC
63157+ if (vma_m)
63158+ BUG_ON(pax_mirror_vma(vma_m, vma));
63159+#endif
63160+
63161 return 0;
63162 }
63163
63164@@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63165 struct rb_node **rb_link, *rb_parent;
63166 struct mempolicy *pol;
63167
63168+ BUG_ON(vma->vm_mirror);
63169+
63170 /*
63171 * If anonymous vma has not yet been faulted, update new pgoff
63172 * to match new location, to increase its chance of merging.
63173@@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63174 return NULL;
63175 }
63176
63177+#ifdef CONFIG_PAX_SEGMEXEC
63178+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63179+{
63180+ struct vm_area_struct *prev_m;
63181+ struct rb_node **rb_link_m, *rb_parent_m;
63182+ struct mempolicy *pol_m;
63183+
63184+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63185+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63186+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63187+ *vma_m = *vma;
63188+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63189+ if (anon_vma_clone(vma_m, vma))
63190+ return -ENOMEM;
63191+ pol_m = vma_policy(vma_m);
63192+ mpol_get(pol_m);
63193+ vma_set_policy(vma_m, pol_m);
63194+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63195+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63196+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63197+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63198+ if (vma_m->vm_file)
63199+ get_file(vma_m->vm_file);
63200+ if (vma_m->vm_ops && vma_m->vm_ops->open)
63201+ vma_m->vm_ops->open(vma_m);
63202+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63203+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63204+ vma_m->vm_mirror = vma;
63205+ vma->vm_mirror = vma_m;
63206+ return 0;
63207+}
63208+#endif
63209+
63210 /*
63211 * Return true if the calling process may expand its vm space by the passed
63212 * number of pages
63213@@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63214 unsigned long lim;
63215
63216 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63217-
63218+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63219 if (cur + npages > lim)
63220 return 0;
63221 return 1;
63222@@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63223 vma->vm_start = addr;
63224 vma->vm_end = addr + len;
63225
63226+#ifdef CONFIG_PAX_MPROTECT
63227+ if (mm->pax_flags & MF_PAX_MPROTECT) {
63228+#ifndef CONFIG_PAX_MPROTECT_COMPAT
63229+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63230+ return -EPERM;
63231+ if (!(vm_flags & VM_EXEC))
63232+ vm_flags &= ~VM_MAYEXEC;
63233+#else
63234+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63235+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63236+#endif
63237+ else
63238+ vm_flags &= ~VM_MAYWRITE;
63239+ }
63240+#endif
63241+
63242 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63243 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63244
63245diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63246--- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63247+++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63248@@ -23,10 +23,16 @@
63249 #include <linux/mmu_notifier.h>
63250 #include <linux/migrate.h>
63251 #include <linux/perf_event.h>
63252+
63253+#ifdef CONFIG_PAX_MPROTECT
63254+#include <linux/elf.h>
63255+#endif
63256+
63257 #include <asm/uaccess.h>
63258 #include <asm/pgtable.h>
63259 #include <asm/cacheflush.h>
63260 #include <asm/tlbflush.h>
63261+#include <asm/mmu_context.h>
63262
63263 #ifndef pgprot_modify
63264 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63265@@ -141,6 +147,48 @@ static void change_protection(struct vm_
63266 flush_tlb_range(vma, start, end);
63267 }
63268
63269+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63270+/* called while holding the mmap semaphor for writing except stack expansion */
63271+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63272+{
63273+ unsigned long oldlimit, newlimit = 0UL;
63274+
63275+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63276+ return;
63277+
63278+ spin_lock(&mm->page_table_lock);
63279+ oldlimit = mm->context.user_cs_limit;
63280+ if ((prot & VM_EXEC) && oldlimit < end)
63281+ /* USER_CS limit moved up */
63282+ newlimit = end;
63283+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63284+ /* USER_CS limit moved down */
63285+ newlimit = start;
63286+
63287+ if (newlimit) {
63288+ mm->context.user_cs_limit = newlimit;
63289+
63290+#ifdef CONFIG_SMP
63291+ wmb();
63292+ cpus_clear(mm->context.cpu_user_cs_mask);
63293+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63294+#endif
63295+
63296+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63297+ }
63298+ spin_unlock(&mm->page_table_lock);
63299+ if (newlimit == end) {
63300+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
63301+
63302+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
63303+ if (is_vm_hugetlb_page(vma))
63304+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63305+ else
63306+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63307+ }
63308+}
63309+#endif
63310+
63311 int
63312 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63313 unsigned long start, unsigned long end, unsigned long newflags)
63314@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63315 int error;
63316 int dirty_accountable = 0;
63317
63318+#ifdef CONFIG_PAX_SEGMEXEC
63319+ struct vm_area_struct *vma_m = NULL;
63320+ unsigned long start_m, end_m;
63321+
63322+ start_m = start + SEGMEXEC_TASK_SIZE;
63323+ end_m = end + SEGMEXEC_TASK_SIZE;
63324+#endif
63325+
63326 if (newflags == oldflags) {
63327 *pprev = vma;
63328 return 0;
63329 }
63330
63331+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63332+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63333+
63334+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63335+ return -ENOMEM;
63336+
63337+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63338+ return -ENOMEM;
63339+ }
63340+
63341 /*
63342 * If we make a private mapping writable we increase our commit;
63343 * but (without finer accounting) cannot reduce our commit if we
63344@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63345 }
63346 }
63347
63348+#ifdef CONFIG_PAX_SEGMEXEC
63349+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63350+ if (start != vma->vm_start) {
63351+ error = split_vma(mm, vma, start, 1);
63352+ if (error)
63353+ goto fail;
63354+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63355+ *pprev = (*pprev)->vm_next;
63356+ }
63357+
63358+ if (end != vma->vm_end) {
63359+ error = split_vma(mm, vma, end, 0);
63360+ if (error)
63361+ goto fail;
63362+ }
63363+
63364+ if (pax_find_mirror_vma(vma)) {
63365+ error = __do_munmap(mm, start_m, end_m - start_m);
63366+ if (error)
63367+ goto fail;
63368+ } else {
63369+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63370+ if (!vma_m) {
63371+ error = -ENOMEM;
63372+ goto fail;
63373+ }
63374+ vma->vm_flags = newflags;
63375+ error = pax_mirror_vma(vma_m, vma);
63376+ if (error) {
63377+ vma->vm_flags = oldflags;
63378+ goto fail;
63379+ }
63380+ }
63381+ }
63382+#endif
63383+
63384 /*
63385 * First try to merge with previous and/or next vma.
63386 */
63387@@ -204,9 +306,21 @@ success:
63388 * vm_flags and vm_page_prot are protected by the mmap_sem
63389 * held in write mode.
63390 */
63391+
63392+#ifdef CONFIG_PAX_SEGMEXEC
63393+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63394+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63395+#endif
63396+
63397 vma->vm_flags = newflags;
63398+
63399+#ifdef CONFIG_PAX_MPROTECT
63400+ if (mm->binfmt && mm->binfmt->handle_mprotect)
63401+ mm->binfmt->handle_mprotect(vma, newflags);
63402+#endif
63403+
63404 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63405- vm_get_page_prot(newflags));
63406+ vm_get_page_prot(vma->vm_flags));
63407
63408 if (vma_wants_writenotify(vma)) {
63409 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63410@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63411 end = start + len;
63412 if (end <= start)
63413 return -ENOMEM;
63414+
63415+#ifdef CONFIG_PAX_SEGMEXEC
63416+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63417+ if (end > SEGMEXEC_TASK_SIZE)
63418+ return -EINVAL;
63419+ } else
63420+#endif
63421+
63422+ if (end > TASK_SIZE)
63423+ return -EINVAL;
63424+
63425 if (!arch_validate_prot(prot))
63426 return -EINVAL;
63427
63428@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63429 /*
63430 * Does the application expect PROT_READ to imply PROT_EXEC:
63431 */
63432- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63433+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63434 prot |= PROT_EXEC;
63435
63436 vm_flags = calc_vm_prot_bits(prot);
63437@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63438 if (start > vma->vm_start)
63439 prev = vma;
63440
63441+#ifdef CONFIG_PAX_MPROTECT
63442+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63443+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
63444+#endif
63445+
63446 for (nstart = start ; ; ) {
63447 unsigned long newflags;
63448
63449@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63450
63451 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63452 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63453+ if (prot & (PROT_WRITE | PROT_EXEC))
63454+ gr_log_rwxmprotect(vma->vm_file);
63455+
63456+ error = -EACCES;
63457+ goto out;
63458+ }
63459+
63460+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63461 error = -EACCES;
63462 goto out;
63463 }
63464@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63465 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63466 if (error)
63467 goto out;
63468+
63469+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
63470+
63471 nstart = tmp;
63472
63473 if (nstart < prev->vm_end)
63474diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63475--- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63476+++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63477@@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63478 continue;
63479 pte = ptep_clear_flush(vma, old_addr, old_pte);
63480 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63481+
63482+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63483+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63484+ pte = pte_exprotect(pte);
63485+#endif
63486+
63487 set_pte_at(mm, new_addr, new_pte, pte);
63488 }
63489
63490@@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63491 if (is_vm_hugetlb_page(vma))
63492 goto Einval;
63493
63494+#ifdef CONFIG_PAX_SEGMEXEC
63495+ if (pax_find_mirror_vma(vma))
63496+ goto Einval;
63497+#endif
63498+
63499 /* We can't remap across vm area boundaries */
63500 if (old_len > vma->vm_end - addr)
63501 goto Efault;
63502@@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63503 unsigned long ret = -EINVAL;
63504 unsigned long charged = 0;
63505 unsigned long map_flags;
63506+ unsigned long pax_task_size = TASK_SIZE;
63507
63508 if (new_addr & ~PAGE_MASK)
63509 goto out;
63510
63511- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63512+#ifdef CONFIG_PAX_SEGMEXEC
63513+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63514+ pax_task_size = SEGMEXEC_TASK_SIZE;
63515+#endif
63516+
63517+ pax_task_size -= PAGE_SIZE;
63518+
63519+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63520 goto out;
63521
63522 /* Check if the location we're moving into overlaps the
63523 * old location at all, and fail if it does.
63524 */
63525- if ((new_addr <= addr) && (new_addr+new_len) > addr)
63526- goto out;
63527-
63528- if ((addr <= new_addr) && (addr+old_len) > new_addr)
63529+ if (addr + old_len > new_addr && new_addr + new_len > addr)
63530 goto out;
63531
63532 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63533@@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63534 struct vm_area_struct *vma;
63535 unsigned long ret = -EINVAL;
63536 unsigned long charged = 0;
63537+ unsigned long pax_task_size = TASK_SIZE;
63538
63539 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63540 goto out;
63541@@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63542 if (!new_len)
63543 goto out;
63544
63545+#ifdef CONFIG_PAX_SEGMEXEC
63546+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63547+ pax_task_size = SEGMEXEC_TASK_SIZE;
63548+#endif
63549+
63550+ pax_task_size -= PAGE_SIZE;
63551+
63552+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63553+ old_len > pax_task_size || addr > pax_task_size-old_len)
63554+ goto out;
63555+
63556 if (flags & MREMAP_FIXED) {
63557 if (flags & MREMAP_MAYMOVE)
63558 ret = mremap_to(addr, old_len, new_addr, new_len);
63559@@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63560 addr + new_len);
63561 }
63562 ret = addr;
63563+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63564 goto out;
63565 }
63566 }
63567@@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63568 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63569 if (ret)
63570 goto out;
63571+
63572+ map_flags = vma->vm_flags;
63573 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63574+ if (!(ret & ~PAGE_MASK)) {
63575+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63576+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63577+ }
63578 }
63579 out:
63580 if (ret & ~PAGE_MASK)
63581diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63582--- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63583+++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63584@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63585 unsigned long __init free_all_memory_core_early(int nodeid)
63586 {
63587 int i;
63588- u64 start, end;
63589+ u64 start, end, startrange, endrange;
63590 unsigned long count = 0;
63591- struct range *range = NULL;
63592+ struct range *range = NULL, rangerange = { 0, 0 };
63593 int nr_range;
63594
63595 nr_range = get_free_all_memory_range(&range, nodeid);
63596+ startrange = __pa(range) >> PAGE_SHIFT;
63597+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63598
63599 for (i = 0; i < nr_range; i++) {
63600 start = range[i].start;
63601 end = range[i].end;
63602+ if (start <= endrange && startrange < end) {
63603+ BUG_ON(rangerange.start | rangerange.end);
63604+ rangerange = range[i];
63605+ continue;
63606+ }
63607 count += end - start;
63608 __free_pages_memory(start, end);
63609 }
63610+ start = rangerange.start;
63611+ end = rangerange.end;
63612+ count += end - start;
63613+ __free_pages_memory(start, end);
63614
63615 return count;
63616 }
63617diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63618--- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63619+++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63620@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63621 int sysctl_overcommit_ratio = 50; /* default is 50% */
63622 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63623 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63624-int heap_stack_gap = 0;
63625
63626 atomic_long_t mmap_pages_allocated;
63627
63628@@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63629 EXPORT_SYMBOL(find_vma);
63630
63631 /*
63632- * find a VMA
63633- * - we don't extend stack VMAs under NOMMU conditions
63634- */
63635-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63636-{
63637- return find_vma(mm, addr);
63638-}
63639-
63640-/*
63641 * expand a stack to a given address
63642 * - not supported under NOMMU conditions
63643 */
63644@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63645
63646 /* most fields are the same, copy all, and then fixup */
63647 *new = *vma;
63648+ INIT_LIST_HEAD(&new->anon_vma_chain);
63649 *region = *vma->vm_region;
63650 new->vm_region = region;
63651
63652diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63653--- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63654+++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63655@@ -337,7 +337,7 @@ out:
63656 * This usage means that zero-order pages may not be compound.
63657 */
63658
63659-static void free_compound_page(struct page *page)
63660+void free_compound_page(struct page *page)
63661 {
63662 __free_pages_ok(page, compound_order(page));
63663 }
63664@@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63665 int i;
63666 int bad = 0;
63667
63668+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63669+ unsigned long index = 1UL << order;
63670+#endif
63671+
63672 trace_mm_page_free_direct(page, order);
63673 kmemcheck_free_shadow(page, order);
63674
63675@@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63676 debug_check_no_obj_freed(page_address(page),
63677 PAGE_SIZE << order);
63678 }
63679+
63680+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63681+ for (; index; --index)
63682+ sanitize_highpage(page + index - 1);
63683+#endif
63684+
63685 arch_free_page(page, order);
63686 kernel_map_pages(page, 1 << order, 0);
63687
63688@@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63689 arch_alloc_page(page, order);
63690 kernel_map_pages(page, 1 << order, 1);
63691
63692+#ifndef CONFIG_PAX_MEMORY_SANITIZE
63693 if (gfp_flags & __GFP_ZERO)
63694 prep_zero_page(page, order, gfp_flags);
63695+#endif
63696
63697 if (order && (gfp_flags & __GFP_COMP))
63698 prep_compound_page(page, order);
63699@@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63700 int cpu;
63701 struct zone *zone;
63702
63703+ pax_track_stack();
63704+
63705 for_each_populated_zone(zone) {
63706 if (skip_free_areas_zone(filter, zone))
63707 continue;
63708diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63709--- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63710+++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63711@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63712 static unsigned int pcpu_last_unit_cpu __read_mostly;
63713
63714 /* the address of the first chunk which starts with the kernel static area */
63715-void *pcpu_base_addr __read_mostly;
63716+void *pcpu_base_addr __read_only;
63717 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63718
63719 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63720diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63721--- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63722+++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63723@@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63724 struct anon_vma *anon_vma = vma->anon_vma;
63725 struct anon_vma_chain *avc;
63726
63727+#ifdef CONFIG_PAX_SEGMEXEC
63728+ struct anon_vma_chain *avc_m = NULL;
63729+#endif
63730+
63731 might_sleep();
63732 if (unlikely(!anon_vma)) {
63733 struct mm_struct *mm = vma->vm_mm;
63734@@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63735 if (!avc)
63736 goto out_enomem;
63737
63738+#ifdef CONFIG_PAX_SEGMEXEC
63739+ avc_m = anon_vma_chain_alloc();
63740+ if (!avc_m)
63741+ goto out_enomem_free_avc;
63742+#endif
63743+
63744 anon_vma = find_mergeable_anon_vma(vma);
63745 allocated = NULL;
63746 if (!anon_vma) {
63747@@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63748 /* page_table_lock to protect against threads */
63749 spin_lock(&mm->page_table_lock);
63750 if (likely(!vma->anon_vma)) {
63751+
63752+#ifdef CONFIG_PAX_SEGMEXEC
63753+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63754+
63755+ if (vma_m) {
63756+ BUG_ON(vma_m->anon_vma);
63757+ vma_m->anon_vma = anon_vma;
63758+ avc_m->anon_vma = anon_vma;
63759+ avc_m->vma = vma;
63760+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63761+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
63762+ avc_m = NULL;
63763+ }
63764+#endif
63765+
63766 vma->anon_vma = anon_vma;
63767 avc->anon_vma = anon_vma;
63768 avc->vma = vma;
63769@@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63770
63771 if (unlikely(allocated))
63772 put_anon_vma(allocated);
63773+
63774+#ifdef CONFIG_PAX_SEGMEXEC
63775+ if (unlikely(avc_m))
63776+ anon_vma_chain_free(avc_m);
63777+#endif
63778+
63779 if (unlikely(avc))
63780 anon_vma_chain_free(avc);
63781 }
63782 return 0;
63783
63784 out_enomem_free_avc:
63785+
63786+#ifdef CONFIG_PAX_SEGMEXEC
63787+ if (avc_m)
63788+ anon_vma_chain_free(avc_m);
63789+#endif
63790+
63791 anon_vma_chain_free(avc);
63792 out_enomem:
63793 return -ENOMEM;
63794@@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63795 * Attach the anon_vmas from src to dst.
63796 * Returns 0 on success, -ENOMEM on failure.
63797 */
63798-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63799+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63800 {
63801 struct anon_vma_chain *avc, *pavc;
63802
63803@@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63804 * the corresponding VMA in the parent process is attached to.
63805 * Returns 0 on success, non-zero on failure.
63806 */
63807-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63808+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63809 {
63810 struct anon_vma_chain *avc;
63811 struct anon_vma *anon_vma;
63812diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63813--- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63814+++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63815@@ -31,7 +31,7 @@
63816 #include <linux/percpu_counter.h>
63817 #include <linux/swap.h>
63818
63819-static struct vfsmount *shm_mnt;
63820+struct vfsmount *shm_mnt;
63821
63822 #ifdef CONFIG_SHMEM
63823 /*
63824@@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63825 goto unlock;
63826 }
63827 entry = shmem_swp_entry(info, index, NULL);
63828+ if (!entry)
63829+ goto unlock;
63830 if (entry->val) {
63831 /*
63832 * The more uptodate page coming down from a stacked
63833@@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63834 struct vm_area_struct pvma;
63835 struct page *page;
63836
63837+ pax_track_stack();
63838+
63839 spol = mpol_cond_copy(&mpol,
63840 mpol_shared_policy_lookup(&info->policy, idx));
63841
63842@@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63843
63844 info = SHMEM_I(inode);
63845 inode->i_size = len-1;
63846- if (len <= (char *)inode - (char *)info) {
63847+ if (len <= (char *)inode - (char *)info && len <= 64) {
63848 /* do it inline */
63849 memcpy(info, symname, len);
63850 inode->i_op = &shmem_symlink_inline_operations;
63851@@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63852 int err = -ENOMEM;
63853
63854 /* Round up to L1_CACHE_BYTES to resist false sharing */
63855- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63856- L1_CACHE_BYTES), GFP_KERNEL);
63857+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63858 if (!sbinfo)
63859 return -ENOMEM;
63860
63861diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63862--- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63863+++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63864@@ -150,7 +150,7 @@
63865
63866 /* Legal flag mask for kmem_cache_create(). */
63867 #if DEBUG
63868-# define CREATE_MASK (SLAB_RED_ZONE | \
63869+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63870 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63871 SLAB_CACHE_DMA | \
63872 SLAB_STORE_USER | \
63873@@ -158,7 +158,7 @@
63874 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63875 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63876 #else
63877-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63878+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63879 SLAB_CACHE_DMA | \
63880 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63881 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63882@@ -287,7 +287,7 @@ struct kmem_list3 {
63883 * Need this for bootstrapping a per node allocator.
63884 */
63885 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63886-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63887+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63888 #define CACHE_CACHE 0
63889 #define SIZE_AC MAX_NUMNODES
63890 #define SIZE_L3 (2 * MAX_NUMNODES)
63891@@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63892 if ((x)->max_freeable < i) \
63893 (x)->max_freeable = i; \
63894 } while (0)
63895-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63896-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63897-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63898-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63899+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63900+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63901+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63902+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63903 #else
63904 #define STATS_INC_ACTIVE(x) do { } while (0)
63905 #define STATS_DEC_ACTIVE(x) do { } while (0)
63906@@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63907 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63908 */
63909 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63910- const struct slab *slab, void *obj)
63911+ const struct slab *slab, const void *obj)
63912 {
63913 u32 offset = (obj - slab->s_mem);
63914 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63915@@ -563,7 +563,7 @@ struct cache_names {
63916 static struct cache_names __initdata cache_names[] = {
63917 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63918 #include <linux/kmalloc_sizes.h>
63919- {NULL,}
63920+ {NULL}
63921 #undef CACHE
63922 };
63923
63924@@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63925 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63926 sizes[INDEX_AC].cs_size,
63927 ARCH_KMALLOC_MINALIGN,
63928- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63929+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63930 NULL);
63931
63932 if (INDEX_AC != INDEX_L3) {
63933@@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63934 kmem_cache_create(names[INDEX_L3].name,
63935 sizes[INDEX_L3].cs_size,
63936 ARCH_KMALLOC_MINALIGN,
63937- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63938+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63939 NULL);
63940 }
63941
63942@@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63943 sizes->cs_cachep = kmem_cache_create(names->name,
63944 sizes->cs_size,
63945 ARCH_KMALLOC_MINALIGN,
63946- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63947+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63948 NULL);
63949 }
63950 #ifdef CONFIG_ZONE_DMA
63951@@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63952 }
63953 /* cpu stats */
63954 {
63955- unsigned long allochit = atomic_read(&cachep->allochit);
63956- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63957- unsigned long freehit = atomic_read(&cachep->freehit);
63958- unsigned long freemiss = atomic_read(&cachep->freemiss);
63959+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63960+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63961+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63962+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63963
63964 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63965 allochit, allocmiss, freehit, freemiss);
63966@@ -4530,15 +4530,66 @@ static const struct file_operations proc
63967
63968 static int __init slab_proc_init(void)
63969 {
63970- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63971+ mode_t gr_mode = S_IRUGO;
63972+
63973+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63974+ gr_mode = S_IRUSR;
63975+#endif
63976+
63977+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63978 #ifdef CONFIG_DEBUG_SLAB_LEAK
63979- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63980+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63981 #endif
63982 return 0;
63983 }
63984 module_init(slab_proc_init);
63985 #endif
63986
63987+void check_object_size(const void *ptr, unsigned long n, bool to)
63988+{
63989+
63990+#ifdef CONFIG_PAX_USERCOPY
63991+ struct page *page;
63992+ struct kmem_cache *cachep = NULL;
63993+ struct slab *slabp;
63994+ unsigned int objnr;
63995+ unsigned long offset;
63996+
63997+ if (!n)
63998+ return;
63999+
64000+ if (ZERO_OR_NULL_PTR(ptr))
64001+ goto report;
64002+
64003+ if (!virt_addr_valid(ptr))
64004+ return;
64005+
64006+ page = virt_to_head_page(ptr);
64007+
64008+ if (!PageSlab(page)) {
64009+ if (object_is_on_stack(ptr, n) == -1)
64010+ goto report;
64011+ return;
64012+ }
64013+
64014+ cachep = page_get_cache(page);
64015+ if (!(cachep->flags & SLAB_USERCOPY))
64016+ goto report;
64017+
64018+ slabp = page_get_slab(page);
64019+ objnr = obj_to_index(cachep, slabp, ptr);
64020+ BUG_ON(objnr >= cachep->num);
64021+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
64022+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
64023+ return;
64024+
64025+report:
64026+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
64027+#endif
64028+
64029+}
64030+EXPORT_SYMBOL(check_object_size);
64031+
64032 /**
64033 * ksize - get the actual amount of memory allocated for a given object
64034 * @objp: Pointer to the object
64035diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
64036--- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
64037+++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
64038@@ -29,7 +29,7 @@
64039 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
64040 * alloc_pages() directly, allocating compound pages so the page order
64041 * does not have to be separately tracked, and also stores the exact
64042- * allocation size in page->private so that it can be used to accurately
64043+ * allocation size in slob_page->size so that it can be used to accurately
64044 * provide ksize(). These objects are detected in kfree() because slob_page()
64045 * is false for them.
64046 *
64047@@ -58,6 +58,7 @@
64048 */
64049
64050 #include <linux/kernel.h>
64051+#include <linux/sched.h>
64052 #include <linux/slab.h>
64053 #include <linux/mm.h>
64054 #include <linux/swap.h> /* struct reclaim_state */
64055@@ -102,7 +103,8 @@ struct slob_page {
64056 unsigned long flags; /* mandatory */
64057 atomic_t _count; /* mandatory */
64058 slobidx_t units; /* free units left in page */
64059- unsigned long pad[2];
64060+ unsigned long pad[1];
64061+ unsigned long size; /* size when >=PAGE_SIZE */
64062 slob_t *free; /* first free slob_t in page */
64063 struct list_head list; /* linked list of free pages */
64064 };
64065@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
64066 */
64067 static inline int is_slob_page(struct slob_page *sp)
64068 {
64069- return PageSlab((struct page *)sp);
64070+ return PageSlab((struct page *)sp) && !sp->size;
64071 }
64072
64073 static inline void set_slob_page(struct slob_page *sp)
64074@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
64075
64076 static inline struct slob_page *slob_page(const void *addr)
64077 {
64078- return (struct slob_page *)virt_to_page(addr);
64079+ return (struct slob_page *)virt_to_head_page(addr);
64080 }
64081
64082 /*
64083@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
64084 /*
64085 * Return the size of a slob block.
64086 */
64087-static slobidx_t slob_units(slob_t *s)
64088+static slobidx_t slob_units(const slob_t *s)
64089 {
64090 if (s->units > 0)
64091 return s->units;
64092@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
64093 /*
64094 * Return the next free slob block pointer after this one.
64095 */
64096-static slob_t *slob_next(slob_t *s)
64097+static slob_t *slob_next(const slob_t *s)
64098 {
64099 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
64100 slobidx_t next;
64101@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
64102 /*
64103 * Returns true if s is the last free block in its page.
64104 */
64105-static int slob_last(slob_t *s)
64106+static int slob_last(const slob_t *s)
64107 {
64108 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
64109 }
64110@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
64111 if (!page)
64112 return NULL;
64113
64114+ set_slob_page(page);
64115 return page_address(page);
64116 }
64117
64118@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
64119 if (!b)
64120 return NULL;
64121 sp = slob_page(b);
64122- set_slob_page(sp);
64123
64124 spin_lock_irqsave(&slob_lock, flags);
64125 sp->units = SLOB_UNITS(PAGE_SIZE);
64126 sp->free = b;
64127+ sp->size = 0;
64128 INIT_LIST_HEAD(&sp->list);
64129 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
64130 set_slob_page_free(sp, slob_list);
64131@@ -476,10 +479,9 @@ out:
64132 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
64133 */
64134
64135-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64136+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
64137 {
64138- unsigned int *m;
64139- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64140+ slob_t *m;
64141 void *ret;
64142
64143 lockdep_trace_alloc(gfp);
64144@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
64145
64146 if (!m)
64147 return NULL;
64148- *m = size;
64149+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64150+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64151+ m[0].units = size;
64152+ m[1].units = align;
64153 ret = (void *)m + align;
64154
64155 trace_kmalloc_node(_RET_IP_, ret,
64156@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64157 gfp |= __GFP_COMP;
64158 ret = slob_new_pages(gfp, order, node);
64159 if (ret) {
64160- struct page *page;
64161- page = virt_to_page(ret);
64162- page->private = size;
64163+ struct slob_page *sp;
64164+ sp = slob_page(ret);
64165+ sp->size = size;
64166 }
64167
64168 trace_kmalloc_node(_RET_IP_, ret,
64169 size, PAGE_SIZE << order, gfp, node);
64170 }
64171
64172- kmemleak_alloc(ret, size, 1, gfp);
64173+ return ret;
64174+}
64175+
64176+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64177+{
64178+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64179+ void *ret = __kmalloc_node_align(size, gfp, node, align);
64180+
64181+ if (!ZERO_OR_NULL_PTR(ret))
64182+ kmemleak_alloc(ret, size, 1, gfp);
64183 return ret;
64184 }
64185 EXPORT_SYMBOL(__kmalloc_node);
64186@@ -531,13 +545,88 @@ void kfree(const void *block)
64187 sp = slob_page(block);
64188 if (is_slob_page(sp)) {
64189 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64190- unsigned int *m = (unsigned int *)(block - align);
64191- slob_free(m, *m + align);
64192- } else
64193+ slob_t *m = (slob_t *)(block - align);
64194+ slob_free(m, m[0].units + align);
64195+ } else {
64196+ clear_slob_page(sp);
64197+ free_slob_page(sp);
64198+ sp->size = 0;
64199 put_page(&sp->page);
64200+ }
64201 }
64202 EXPORT_SYMBOL(kfree);
64203
64204+void check_object_size(const void *ptr, unsigned long n, bool to)
64205+{
64206+
64207+#ifdef CONFIG_PAX_USERCOPY
64208+ struct slob_page *sp;
64209+ const slob_t *free;
64210+ const void *base;
64211+ unsigned long flags;
64212+
64213+ if (!n)
64214+ return;
64215+
64216+ if (ZERO_OR_NULL_PTR(ptr))
64217+ goto report;
64218+
64219+ if (!virt_addr_valid(ptr))
64220+ return;
64221+
64222+ sp = slob_page(ptr);
64223+ if (!PageSlab((struct page*)sp)) {
64224+ if (object_is_on_stack(ptr, n) == -1)
64225+ goto report;
64226+ return;
64227+ }
64228+
64229+ if (sp->size) {
64230+ base = page_address(&sp->page);
64231+ if (base <= ptr && n <= sp->size - (ptr - base))
64232+ return;
64233+ goto report;
64234+ }
64235+
64236+ /* some tricky double walking to find the chunk */
64237+ spin_lock_irqsave(&slob_lock, flags);
64238+ base = (void *)((unsigned long)ptr & PAGE_MASK);
64239+ free = sp->free;
64240+
64241+ while (!slob_last(free) && (void *)free <= ptr) {
64242+ base = free + slob_units(free);
64243+ free = slob_next(free);
64244+ }
64245+
64246+ while (base < (void *)free) {
64247+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64248+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
64249+ int offset;
64250+
64251+ if (ptr < base + align)
64252+ break;
64253+
64254+ offset = ptr - base - align;
64255+ if (offset >= m) {
64256+ base += size;
64257+ continue;
64258+ }
64259+
64260+ if (n > m - offset)
64261+ break;
64262+
64263+ spin_unlock_irqrestore(&slob_lock, flags);
64264+ return;
64265+ }
64266+
64267+ spin_unlock_irqrestore(&slob_lock, flags);
64268+report:
64269+ pax_report_usercopy(ptr, n, to, NULL);
64270+#endif
64271+
64272+}
64273+EXPORT_SYMBOL(check_object_size);
64274+
64275 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64276 size_t ksize(const void *block)
64277 {
64278@@ -550,10 +639,10 @@ size_t ksize(const void *block)
64279 sp = slob_page(block);
64280 if (is_slob_page(sp)) {
64281 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64282- unsigned int *m = (unsigned int *)(block - align);
64283- return SLOB_UNITS(*m) * SLOB_UNIT;
64284+ slob_t *m = (slob_t *)(block - align);
64285+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64286 } else
64287- return sp->page.private;
64288+ return sp->size;
64289 }
64290 EXPORT_SYMBOL(ksize);
64291
64292@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64293 {
64294 struct kmem_cache *c;
64295
64296+#ifdef CONFIG_PAX_USERCOPY
64297+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
64298+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64299+#else
64300 c = slob_alloc(sizeof(struct kmem_cache),
64301 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64302+#endif
64303
64304 if (c) {
64305 c->name = name;
64306@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64307 {
64308 void *b;
64309
64310+#ifdef CONFIG_PAX_USERCOPY
64311+ b = __kmalloc_node_align(c->size, flags, node, c->align);
64312+#else
64313 if (c->size < PAGE_SIZE) {
64314 b = slob_alloc(c->size, flags, c->align, node);
64315 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64316 SLOB_UNITS(c->size) * SLOB_UNIT,
64317 flags, node);
64318 } else {
64319+ struct slob_page *sp;
64320+
64321 b = slob_new_pages(flags, get_order(c->size), node);
64322+ sp = slob_page(b);
64323+ sp->size = c->size;
64324 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64325 PAGE_SIZE << get_order(c->size),
64326 flags, node);
64327 }
64328+#endif
64329
64330 if (c->ctor)
64331 c->ctor(b);
64332@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64333
64334 static void __kmem_cache_free(void *b, int size)
64335 {
64336- if (size < PAGE_SIZE)
64337+ struct slob_page *sp = slob_page(b);
64338+
64339+ if (is_slob_page(sp))
64340 slob_free(b, size);
64341- else
64342+ else {
64343+ clear_slob_page(sp);
64344+ free_slob_page(sp);
64345+ sp->size = 0;
64346 slob_free_pages(b, get_order(size));
64347+ }
64348 }
64349
64350 static void kmem_rcu_free(struct rcu_head *head)
64351@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64352
64353 void kmem_cache_free(struct kmem_cache *c, void *b)
64354 {
64355+ int size = c->size;
64356+
64357+#ifdef CONFIG_PAX_USERCOPY
64358+ if (size + c->align < PAGE_SIZE) {
64359+ size += c->align;
64360+ b -= c->align;
64361+ }
64362+#endif
64363+
64364 kmemleak_free_recursive(b, c->flags);
64365 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64366 struct slob_rcu *slob_rcu;
64367- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64368- slob_rcu->size = c->size;
64369+ slob_rcu = b + (size - sizeof(struct slob_rcu));
64370+ slob_rcu->size = size;
64371 call_rcu(&slob_rcu->head, kmem_rcu_free);
64372 } else {
64373- __kmem_cache_free(b, c->size);
64374+ __kmem_cache_free(b, size);
64375 }
64376
64377+#ifdef CONFIG_PAX_USERCOPY
64378+ trace_kfree(_RET_IP_, b);
64379+#else
64380 trace_kmem_cache_free(_RET_IP_, b);
64381+#endif
64382+
64383 }
64384 EXPORT_SYMBOL(kmem_cache_free);
64385
64386diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64387--- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64388+++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64389@@ -431,7 +431,7 @@ static void print_track(const char *s, s
64390 if (!t->addr)
64391 return;
64392
64393- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64394+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64395 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64396 }
64397
64398@@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64399
64400 page = virt_to_head_page(x);
64401
64402+ BUG_ON(!PageSlab(page));
64403+
64404 slab_free(s, page, x, _RET_IP_);
64405
64406 trace_kmem_cache_free(_RET_IP_, x);
64407@@ -2216,7 +2218,7 @@ static int slub_min_objects;
64408 * Merge control. If this is set then no merging of slab caches will occur.
64409 * (Could be removed. This was introduced to pacify the merge skeptics.)
64410 */
64411-static int slub_nomerge;
64412+static int slub_nomerge = 1;
64413
64414 /*
64415 * Calculate the order of allocation given an slab object size.
64416@@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64417 * list to avoid pounding the page allocator excessively.
64418 */
64419 set_min_partial(s, ilog2(s->size));
64420- s->refcount = 1;
64421+ atomic_set(&s->refcount, 1);
64422 #ifdef CONFIG_NUMA
64423 s->remote_node_defrag_ratio = 1000;
64424 #endif
64425@@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64426 void kmem_cache_destroy(struct kmem_cache *s)
64427 {
64428 down_write(&slub_lock);
64429- s->refcount--;
64430- if (!s->refcount) {
64431+ if (atomic_dec_and_test(&s->refcount)) {
64432 list_del(&s->list);
64433 if (kmem_cache_close(s)) {
64434 printk(KERN_ERR "SLUB %s: %s called for cache that "
64435@@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64436 EXPORT_SYMBOL(__kmalloc_node);
64437 #endif
64438
64439+void check_object_size(const void *ptr, unsigned long n, bool to)
64440+{
64441+
64442+#ifdef CONFIG_PAX_USERCOPY
64443+ struct page *page;
64444+ struct kmem_cache *s = NULL;
64445+ unsigned long offset;
64446+
64447+ if (!n)
64448+ return;
64449+
64450+ if (ZERO_OR_NULL_PTR(ptr))
64451+ goto report;
64452+
64453+ if (!virt_addr_valid(ptr))
64454+ return;
64455+
64456+ page = virt_to_head_page(ptr);
64457+
64458+ if (!PageSlab(page)) {
64459+ if (object_is_on_stack(ptr, n) == -1)
64460+ goto report;
64461+ return;
64462+ }
64463+
64464+ s = page->slab;
64465+ if (!(s->flags & SLAB_USERCOPY))
64466+ goto report;
64467+
64468+ offset = (ptr - page_address(page)) % s->size;
64469+ if (offset <= s->objsize && n <= s->objsize - offset)
64470+ return;
64471+
64472+report:
64473+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64474+#endif
64475+
64476+}
64477+EXPORT_SYMBOL(check_object_size);
64478+
64479 size_t ksize(const void *object)
64480 {
64481 struct page *page;
64482@@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64483 int node;
64484
64485 list_add(&s->list, &slab_caches);
64486- s->refcount = -1;
64487+ atomic_set(&s->refcount, -1);
64488
64489 for_each_node_state(node, N_NORMAL_MEMORY) {
64490 struct kmem_cache_node *n = get_node(s, node);
64491@@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64492
64493 /* Caches that are not of the two-to-the-power-of size */
64494 if (KMALLOC_MIN_SIZE <= 32) {
64495- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64496+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64497 caches++;
64498 }
64499
64500 if (KMALLOC_MIN_SIZE <= 64) {
64501- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64502+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64503 caches++;
64504 }
64505
64506 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64507- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64508+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64509 caches++;
64510 }
64511
64512@@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64513 /*
64514 * We may have set a slab to be unmergeable during bootstrap.
64515 */
64516- if (s->refcount < 0)
64517+ if (atomic_read(&s->refcount) < 0)
64518 return 1;
64519
64520 return 0;
64521@@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64522 down_write(&slub_lock);
64523 s = find_mergeable(size, align, flags, name, ctor);
64524 if (s) {
64525- s->refcount++;
64526+ atomic_inc(&s->refcount);
64527 /*
64528 * Adjust the object sizes so that we clear
64529 * the complete object on kzalloc.
64530@@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64531 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64532
64533 if (sysfs_slab_alias(s, name)) {
64534- s->refcount--;
64535+ atomic_dec(&s->refcount);
64536 goto err;
64537 }
64538 up_write(&slub_lock);
64539@@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64540
64541 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64542 {
64543- return sprintf(buf, "%d\n", s->refcount - 1);
64544+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64545 }
64546 SLAB_ATTR_RO(aliases);
64547
64548@@ -4945,7 +4986,13 @@ static const struct file_operations proc
64549
64550 static int __init slab_proc_init(void)
64551 {
64552- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64553+ mode_t gr_mode = S_IRUGO;
64554+
64555+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64556+ gr_mode = S_IRUSR;
64557+#endif
64558+
64559+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64560 return 0;
64561 }
64562 module_init(slab_proc_init);
64563diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64564--- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64565+++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64566@@ -31,6 +31,7 @@
64567 #include <linux/backing-dev.h>
64568 #include <linux/memcontrol.h>
64569 #include <linux/gfp.h>
64570+#include <linux/hugetlb.h>
64571
64572 #include "internal.h"
64573
64574@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64575
64576 __page_cache_release(page);
64577 dtor = get_compound_page_dtor(page);
64578+ if (!PageHuge(page))
64579+ BUG_ON(dtor != free_compound_page);
64580 (*dtor)(page);
64581 }
64582
64583diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64584--- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64585+++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64586@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64587
64588 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64589 /* Activity counter to indicate that a swapon or swapoff has occurred */
64590-static atomic_t proc_poll_event = ATOMIC_INIT(0);
64591+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64592
64593 static inline unsigned char swap_count(unsigned char ent)
64594 {
64595@@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64596 }
64597 filp_close(swap_file, NULL);
64598 err = 0;
64599- atomic_inc(&proc_poll_event);
64600+ atomic_inc_unchecked(&proc_poll_event);
64601 wake_up_interruptible(&proc_poll_wait);
64602
64603 out_dput:
64604@@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64605
64606 poll_wait(file, &proc_poll_wait, wait);
64607
64608- if (s->event != atomic_read(&proc_poll_event)) {
64609- s->event = atomic_read(&proc_poll_event);
64610+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64611+ s->event = atomic_read_unchecked(&proc_poll_event);
64612 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64613 }
64614
64615@@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64616 }
64617
64618 s->seq.private = s;
64619- s->event = atomic_read(&proc_poll_event);
64620+ s->event = atomic_read_unchecked(&proc_poll_event);
64621 return ret;
64622 }
64623
64624@@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64625 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64626
64627 mutex_unlock(&swapon_mutex);
64628- atomic_inc(&proc_poll_event);
64629+ atomic_inc_unchecked(&proc_poll_event);
64630 wake_up_interruptible(&proc_poll_wait);
64631
64632 if (S_ISREG(inode->i_mode))
64633diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64634--- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64635+++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64636@@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64637 * allocated buffer. Use this if you don't want to free the buffer immediately
64638 * like, for example, with RCU.
64639 */
64640+#undef __krealloc
64641 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64642 {
64643 void *ret;
64644@@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64645 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64646 * %NULL pointer, the object pointed to is freed.
64647 */
64648+#undef krealloc
64649 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64650 {
64651 void *ret;
64652@@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64653 void arch_pick_mmap_layout(struct mm_struct *mm)
64654 {
64655 mm->mmap_base = TASK_UNMAPPED_BASE;
64656+
64657+#ifdef CONFIG_PAX_RANDMMAP
64658+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64659+ mm->mmap_base += mm->delta_mmap;
64660+#endif
64661+
64662 mm->get_unmapped_area = arch_get_unmapped_area;
64663 mm->unmap_area = arch_unmap_area;
64664 }
64665diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64666--- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64667+++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64668@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64669
64670 pte = pte_offset_kernel(pmd, addr);
64671 do {
64672- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64673- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64674+
64675+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64676+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64677+ BUG_ON(!pte_exec(*pte));
64678+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64679+ continue;
64680+ }
64681+#endif
64682+
64683+ {
64684+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64685+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64686+ }
64687 } while (pte++, addr += PAGE_SIZE, addr != end);
64688 }
64689
64690@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64691 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64692 {
64693 pte_t *pte;
64694+ int ret = -ENOMEM;
64695
64696 /*
64697 * nr is a running index into the array which helps higher level
64698@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64699 pte = pte_alloc_kernel(pmd, addr);
64700 if (!pte)
64701 return -ENOMEM;
64702+
64703+ pax_open_kernel();
64704 do {
64705 struct page *page = pages[*nr];
64706
64707- if (WARN_ON(!pte_none(*pte)))
64708- return -EBUSY;
64709- if (WARN_ON(!page))
64710- return -ENOMEM;
64711+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64712+ if (pgprot_val(prot) & _PAGE_NX)
64713+#endif
64714+
64715+ if (WARN_ON(!pte_none(*pte))) {
64716+ ret = -EBUSY;
64717+ goto out;
64718+ }
64719+ if (WARN_ON(!page)) {
64720+ ret = -ENOMEM;
64721+ goto out;
64722+ }
64723 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64724 (*nr)++;
64725 } while (pte++, addr += PAGE_SIZE, addr != end);
64726- return 0;
64727+ ret = 0;
64728+out:
64729+ pax_close_kernel();
64730+ return ret;
64731 }
64732
64733 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64734@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64735 * and fall back on vmalloc() if that fails. Others
64736 * just put it in the vmalloc space.
64737 */
64738-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64739+#ifdef CONFIG_MODULES
64740+#ifdef MODULES_VADDR
64741 unsigned long addr = (unsigned long)x;
64742 if (addr >= MODULES_VADDR && addr < MODULES_END)
64743 return 1;
64744 #endif
64745+
64746+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64747+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64748+ return 1;
64749+#endif
64750+
64751+#endif
64752+
64753 return is_vmalloc_addr(x);
64754 }
64755
64756@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64757
64758 if (!pgd_none(*pgd)) {
64759 pud_t *pud = pud_offset(pgd, addr);
64760+#ifdef CONFIG_X86
64761+ if (!pud_large(*pud))
64762+#endif
64763 if (!pud_none(*pud)) {
64764 pmd_t *pmd = pmd_offset(pud, addr);
64765+#ifdef CONFIG_X86
64766+ if (!pmd_large(*pmd))
64767+#endif
64768 if (!pmd_none(*pmd)) {
64769 pte_t *ptep, pte;
64770
64771@@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64772 struct vm_struct *area;
64773
64774 BUG_ON(in_interrupt());
64775+
64776+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64777+ if (flags & VM_KERNEXEC) {
64778+ if (start != VMALLOC_START || end != VMALLOC_END)
64779+ return NULL;
64780+ start = (unsigned long)MODULES_EXEC_VADDR;
64781+ end = (unsigned long)MODULES_EXEC_END;
64782+ }
64783+#endif
64784+
64785 if (flags & VM_IOREMAP) {
64786 int bit = fls(size);
64787
64788@@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64789 if (count > totalram_pages)
64790 return NULL;
64791
64792+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64793+ if (!(pgprot_val(prot) & _PAGE_NX))
64794+ flags |= VM_KERNEXEC;
64795+#endif
64796+
64797 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64798 __builtin_return_address(0));
64799 if (!area)
64800@@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64801 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64802 return NULL;
64803
64804+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64805+ if (!(pgprot_val(prot) & _PAGE_NX))
64806+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64807+ node, gfp_mask, caller);
64808+ else
64809+#endif
64810+
64811 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64812 gfp_mask, caller);
64813
64814@@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64815 gfp_mask, prot, node, caller);
64816 }
64817
64818+#undef __vmalloc
64819 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64820 {
64821 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64822@@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64823 * For tight control over page level allocator and protection flags
64824 * use __vmalloc() instead.
64825 */
64826+#undef vmalloc
64827 void *vmalloc(unsigned long size)
64828 {
64829 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64830@@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64831 * For tight control over page level allocator and protection flags
64832 * use __vmalloc() instead.
64833 */
64834+#undef vzalloc
64835 void *vzalloc(unsigned long size)
64836 {
64837 return __vmalloc_node_flags(size, -1,
64838@@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64839 * The resulting memory area is zeroed so it can be mapped to userspace
64840 * without leaking data.
64841 */
64842+#undef vmalloc_user
64843 void *vmalloc_user(unsigned long size)
64844 {
64845 struct vm_struct *area;
64846@@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64847 * For tight control over page level allocator and protection flags
64848 * use __vmalloc() instead.
64849 */
64850+#undef vmalloc_node
64851 void *vmalloc_node(unsigned long size, int node)
64852 {
64853 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64854@@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64855 * For tight control over page level allocator and protection flags
64856 * use __vmalloc_node() instead.
64857 */
64858+#undef vzalloc_node
64859 void *vzalloc_node(unsigned long size, int node)
64860 {
64861 return __vmalloc_node_flags(size, node,
64862@@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64863 * For tight control over page level allocator and protection flags
64864 * use __vmalloc() instead.
64865 */
64866-
64867+#undef vmalloc_exec
64868 void *vmalloc_exec(unsigned long size)
64869 {
64870- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64871+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64872 -1, __builtin_return_address(0));
64873 }
64874
64875@@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64876 * Allocate enough 32bit PA addressable pages to cover @size from the
64877 * page level allocator and map them into contiguous kernel virtual space.
64878 */
64879+#undef vmalloc_32
64880 void *vmalloc_32(unsigned long size)
64881 {
64882 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64883@@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64884 * The resulting memory area is 32bit addressable and zeroed so it can be
64885 * mapped to userspace without leaking data.
64886 */
64887+#undef vmalloc_32_user
64888 void *vmalloc_32_user(unsigned long size)
64889 {
64890 struct vm_struct *area;
64891@@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64892 unsigned long uaddr = vma->vm_start;
64893 unsigned long usize = vma->vm_end - vma->vm_start;
64894
64895+ BUG_ON(vma->vm_mirror);
64896+
64897 if ((PAGE_SIZE-1) & (unsigned long)addr)
64898 return -EINVAL;
64899
64900diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64901--- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64902+++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64903@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64904 *
64905 * vm_stat contains the global counters
64906 */
64907-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64908+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64909 EXPORT_SYMBOL(vm_stat);
64910
64911 #ifdef CONFIG_SMP
64912@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64913 v = p->vm_stat_diff[i];
64914 p->vm_stat_diff[i] = 0;
64915 local_irq_restore(flags);
64916- atomic_long_add(v, &zone->vm_stat[i]);
64917+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64918 global_diff[i] += v;
64919 #ifdef CONFIG_NUMA
64920 /* 3 seconds idle till flush */
64921@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64922
64923 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64924 if (global_diff[i])
64925- atomic_long_add(global_diff[i], &vm_stat[i]);
64926+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64927 }
64928
64929 #endif
64930@@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64931 start_cpu_timer(cpu);
64932 #endif
64933 #ifdef CONFIG_PROC_FS
64934- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64935- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64936- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64937- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64938+ {
64939+ mode_t gr_mode = S_IRUGO;
64940+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64941+ gr_mode = S_IRUSR;
64942+#endif
64943+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64944+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64945+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64946+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64947+#else
64948+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64949+#endif
64950+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64951+ }
64952 #endif
64953 return 0;
64954 }
64955diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64956--- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64957+++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64958@@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64959 err = -EPERM;
64960 if (!capable(CAP_NET_ADMIN))
64961 break;
64962- if ((args.u.name_type >= 0) &&
64963- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64964+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64965 struct vlan_net *vn;
64966
64967 vn = net_generic(net, vlan_net_id);
64968diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64969--- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64970+++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64971@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64972 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64973 return 1;
64974 atm_return(vcc, truesize);
64975- atomic_inc(&vcc->stats->rx_drop);
64976+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64977 return 0;
64978 }
64979 EXPORT_SYMBOL(atm_charge);
64980@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64981 }
64982 }
64983 atm_return(vcc, guess);
64984- atomic_inc(&vcc->stats->rx_drop);
64985+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64986 return NULL;
64987 }
64988 EXPORT_SYMBOL(atm_alloc_charge);
64989@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64990
64991 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64992 {
64993-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64994+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64995 __SONET_ITEMS
64996 #undef __HANDLE_ITEM
64997 }
64998@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64999
65000 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65001 {
65002-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65003+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
65004 __SONET_ITEMS
65005 #undef __HANDLE_ITEM
65006 }
65007diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
65008--- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
65009+++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
65010@@ -48,7 +48,7 @@ struct lane2_ops {
65011 const u8 *tlvs, u32 sizeoftlvs);
65012 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
65013 const u8 *tlvs, u32 sizeoftlvs);
65014-};
65015+} __no_const;
65016
65017 /*
65018 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
65019diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
65020--- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
65021+++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
65022@@ -33,7 +33,7 @@ struct mpoa_client {
65023 struct mpc_parameters parameters; /* parameters for this client */
65024
65025 const struct net_device_ops *old_ops;
65026- struct net_device_ops new_ops;
65027+ net_device_ops_no_const new_ops;
65028 };
65029
65030
65031diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
65032--- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
65033+++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
65034@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
65035 struct timeval now;
65036 struct k_message msg;
65037
65038+ pax_track_stack();
65039+
65040 do_gettimeofday(&now);
65041
65042 read_lock_bh(&client->ingress_lock);
65043diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
65044--- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
65045+++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
65046@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
65047 const struct k_atm_aal_stats *stats)
65048 {
65049 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
65050- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
65051- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
65052- atomic_read(&stats->rx_drop));
65053+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
65054+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
65055+ atomic_read_unchecked(&stats->rx_drop));
65056 }
65057
65058 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
65059@@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
65060 {
65061 struct sock *sk = sk_atm(vcc);
65062
65063+#ifdef CONFIG_GRKERNSEC_HIDESYM
65064+ seq_printf(seq, "%p ", NULL);
65065+#else
65066 seq_printf(seq, "%p ", vcc);
65067+#endif
65068+
65069 if (!vcc->dev)
65070 seq_printf(seq, "Unassigned ");
65071 else
65072@@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
65073 {
65074 if (!vcc->dev)
65075 seq_printf(seq, sizeof(void *) == 4 ?
65076+#ifdef CONFIG_GRKERNSEC_HIDESYM
65077+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
65078+#else
65079 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
65080+#endif
65081 else
65082 seq_printf(seq, "%3d %3d %5d ",
65083 vcc->dev->number, vcc->vpi, vcc->vci);
65084diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
65085--- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
65086+++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
65087@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
65088 static void copy_aal_stats(struct k_atm_aal_stats *from,
65089 struct atm_aal_stats *to)
65090 {
65091-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65092+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65093 __AAL_STAT_ITEMS
65094 #undef __HANDLE_ITEM
65095 }
65096@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
65097 static void subtract_aal_stats(struct k_atm_aal_stats *from,
65098 struct atm_aal_stats *to)
65099 {
65100-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65101+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
65102 __AAL_STAT_ITEMS
65103 #undef __HANDLE_ITEM
65104 }
65105diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
65106--- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
65107+++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
65108@@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
65109 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
65110 dev_add_pack(&hard_iface->batman_adv_ptype);
65111
65112- atomic_set(&hard_iface->seqno, 1);
65113- atomic_set(&hard_iface->frag_seqno, 1);
65114+ atomic_set_unchecked(&hard_iface->seqno, 1);
65115+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
65116 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
65117 hard_iface->net_dev->name);
65118
65119diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
65120--- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
65121+++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
65122@@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
65123 return;
65124
65125 /* could be changed by schedule_own_packet() */
65126- if_incoming_seqno = atomic_read(&if_incoming->seqno);
65127+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
65128
65129 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
65130
65131diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
65132--- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
65133+++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
65134@@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
65135
65136 /* change sequence number to network order */
65137 batman_packet->seqno =
65138- htonl((uint32_t)atomic_read(&hard_iface->seqno));
65139+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
65140
65141 if (vis_server == VIS_TYPE_SERVER_SYNC)
65142 batman_packet->flags |= VIS_SERVER;
65143@@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
65144 else
65145 batman_packet->gw_flags = 0;
65146
65147- atomic_inc(&hard_iface->seqno);
65148+ atomic_inc_unchecked(&hard_iface->seqno);
65149
65150 slide_own_bcast_window(hard_iface);
65151 send_time = own_send_time(bat_priv);
65152diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65153--- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65154+++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65155@@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65156
65157 /* set broadcast sequence number */
65158 bcast_packet->seqno =
65159- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65160+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65161
65162 add_bcast_packet_to_list(bat_priv, skb);
65163
65164@@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65165 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65166
65167 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65168- atomic_set(&bat_priv->bcast_seqno, 1);
65169+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65170 atomic_set(&bat_priv->hna_local_changed, 0);
65171
65172 bat_priv->primary_if = NULL;
65173diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65174--- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65175+++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65176@@ -38,8 +38,8 @@ struct hard_iface {
65177 int16_t if_num;
65178 char if_status;
65179 struct net_device *net_dev;
65180- atomic_t seqno;
65181- atomic_t frag_seqno;
65182+ atomic_unchecked_t seqno;
65183+ atomic_unchecked_t frag_seqno;
65184 unsigned char *packet_buff;
65185 int packet_len;
65186 struct kobject *hardif_obj;
65187@@ -141,7 +141,7 @@ struct bat_priv {
65188 atomic_t orig_interval; /* uint */
65189 atomic_t hop_penalty; /* uint */
65190 atomic_t log_level; /* uint */
65191- atomic_t bcast_seqno;
65192+ atomic_unchecked_t bcast_seqno;
65193 atomic_t bcast_queue_left;
65194 atomic_t batman_queue_left;
65195 char num_ifaces;
65196diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65197--- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65198+++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65199@@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65200 frag1->flags = UNI_FRAG_HEAD | large_tail;
65201 frag2->flags = large_tail;
65202
65203- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65204+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65205 frag1->seqno = htons(seqno - 1);
65206 frag2->seqno = htons(seqno);
65207
65208diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65209--- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65210+++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65211@@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65212
65213 /* Reject if config buffer is too small. */
65214 len = cmd_len - sizeof(*req);
65215- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65216+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65217 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65218 l2cap_build_conf_rsp(sk, rsp,
65219 L2CAP_CONF_REJECT, flags), rsp);
65220diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65221--- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65222+++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65223@@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65224 break;
65225 }
65226
65227+ memset(&cinfo, 0, sizeof(cinfo));
65228 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65229 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65230
65231diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65232--- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65233+++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65234@@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65235
65236 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65237
65238+ memset(&cinfo, 0, sizeof(cinfo));
65239 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65240 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65241
65242diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65243--- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65244+++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65245@@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65246 nexthdr = ip6h->nexthdr;
65247 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65248
65249- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65250+ if (nexthdr != IPPROTO_ICMPV6)
65251 return 0;
65252
65253 /* Okay, we found ICMPv6 header */
65254diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65255--- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65256+++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65257@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65258 tmp.valid_hooks = t->table->valid_hooks;
65259 }
65260 mutex_unlock(&ebt_mutex);
65261- if (copy_to_user(user, &tmp, *len) != 0){
65262+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65263 BUGPRINT("c2u Didn't work\n");
65264 ret = -EFAULT;
65265 break;
65266@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65267 int ret;
65268 void __user *pos;
65269
65270+ pax_track_stack();
65271+
65272 memset(&tinfo, 0, sizeof(tinfo));
65273
65274 if (cmd == EBT_SO_GET_ENTRIES) {
65275diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65276--- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65277+++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65278@@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65279 #ifdef CONFIG_DEBUG_FS
65280 struct debug_fs_counter {
65281 atomic_t caif_nr_socks;
65282- atomic_t num_connect_req;
65283- atomic_t num_connect_resp;
65284- atomic_t num_connect_fail_resp;
65285- atomic_t num_disconnect;
65286- atomic_t num_remote_shutdown_ind;
65287- atomic_t num_tx_flow_off_ind;
65288- atomic_t num_tx_flow_on_ind;
65289- atomic_t num_rx_flow_off;
65290- atomic_t num_rx_flow_on;
65291+ atomic_unchecked_t num_connect_req;
65292+ atomic_unchecked_t num_connect_resp;
65293+ atomic_unchecked_t num_connect_fail_resp;
65294+ atomic_unchecked_t num_disconnect;
65295+ atomic_unchecked_t num_remote_shutdown_ind;
65296+ atomic_unchecked_t num_tx_flow_off_ind;
65297+ atomic_unchecked_t num_tx_flow_on_ind;
65298+ atomic_unchecked_t num_rx_flow_off;
65299+ atomic_unchecked_t num_rx_flow_on;
65300 };
65301 static struct debug_fs_counter cnt;
65302 #define dbfs_atomic_inc(v) atomic_inc(v)
65303+#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65304 #define dbfs_atomic_dec(v) atomic_dec(v)
65305 #else
65306 #define dbfs_atomic_inc(v)
65307@@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65308 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65309 sk_rcvbuf_lowwater(cf_sk));
65310 set_rx_flow_off(cf_sk);
65311- dbfs_atomic_inc(&cnt.num_rx_flow_off);
65312+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65313 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65314 }
65315
65316@@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65317 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65318 set_rx_flow_off(cf_sk);
65319 pr_debug("sending flow OFF due to rmem_schedule\n");
65320- dbfs_atomic_inc(&cnt.num_rx_flow_off);
65321+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65322 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65323 }
65324 skb->dev = NULL;
65325@@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65326 switch (flow) {
65327 case CAIF_CTRLCMD_FLOW_ON_IND:
65328 /* OK from modem to start sending again */
65329- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65330+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65331 set_tx_flow_on(cf_sk);
65332 cf_sk->sk.sk_state_change(&cf_sk->sk);
65333 break;
65334
65335 case CAIF_CTRLCMD_FLOW_OFF_IND:
65336 /* Modem asks us to shut up */
65337- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65338+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65339 set_tx_flow_off(cf_sk);
65340 cf_sk->sk.sk_state_change(&cf_sk->sk);
65341 break;
65342
65343 case CAIF_CTRLCMD_INIT_RSP:
65344 /* We're now connected */
65345- dbfs_atomic_inc(&cnt.num_connect_resp);
65346+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65347 cf_sk->sk.sk_state = CAIF_CONNECTED;
65348 set_tx_flow_on(cf_sk);
65349 cf_sk->sk.sk_state_change(&cf_sk->sk);
65350@@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65351
65352 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65353 /* Connect request failed */
65354- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65355+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65356 cf_sk->sk.sk_err = ECONNREFUSED;
65357 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65358 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65359@@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65360
65361 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65362 /* Modem has closed this connection, or device is down. */
65363- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65364+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65365 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65366 cf_sk->sk.sk_err = ECONNRESET;
65367 set_rx_flow_on(cf_sk);
65368@@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65369 return;
65370
65371 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65372- dbfs_atomic_inc(&cnt.num_rx_flow_on);
65373+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65374 set_rx_flow_on(cf_sk);
65375 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65376 }
65377@@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65378 /*ifindex = id of the interface.*/
65379 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65380
65381- dbfs_atomic_inc(&cnt.num_connect_req);
65382+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65383 cf_sk->layer.receive = caif_sktrecv_cb;
65384 err = caif_connect_client(&cf_sk->conn_req,
65385 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65386@@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65387 spin_unlock(&sk->sk_receive_queue.lock);
65388 sock->sk = NULL;
65389
65390- dbfs_atomic_inc(&cnt.num_disconnect);
65391+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65392
65393 if (cf_sk->debugfs_socket_dir != NULL)
65394 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65395diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65396--- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65397+++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65398@@ -9,6 +9,7 @@
65399 #include <linux/stddef.h>
65400 #include <linux/spinlock.h>
65401 #include <linux/slab.h>
65402+#include <linux/sched.h>
65403 #include <net/caif/caif_layer.h>
65404 #include <net/caif/cfpkt.h>
65405 #include <net/caif/cfctrl.h>
65406@@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65407 dev_info.id = 0xff;
65408 memset(this, 0, sizeof(*this));
65409 cfsrvl_init(&this->serv, 0, &dev_info, false);
65410- atomic_set(&this->req_seq_no, 1);
65411- atomic_set(&this->rsp_seq_no, 1);
65412+ atomic_set_unchecked(&this->req_seq_no, 1);
65413+ atomic_set_unchecked(&this->rsp_seq_no, 1);
65414 this->serv.layer.receive = cfctrl_recv;
65415 sprintf(this->serv.layer.name, "ctrl");
65416 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65417@@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65418 struct cfctrl_request_info *req)
65419 {
65420 spin_lock(&ctrl->info_list_lock);
65421- atomic_inc(&ctrl->req_seq_no);
65422- req->sequence_no = atomic_read(&ctrl->req_seq_no);
65423+ atomic_inc_unchecked(&ctrl->req_seq_no);
65424+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65425 list_add_tail(&req->list, &ctrl->list);
65426 spin_unlock(&ctrl->info_list_lock);
65427 }
65428@@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65429 if (p != first)
65430 pr_warn("Requests are not received in order\n");
65431
65432- atomic_set(&ctrl->rsp_seq_no,
65433+ atomic_set_unchecked(&ctrl->rsp_seq_no,
65434 p->sequence_no);
65435 list_del(&p->list);
65436 goto out;
65437@@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65438 struct cfctrl *cfctrl = container_obj(layer);
65439 struct cfctrl_request_info rsp, *req;
65440
65441+ pax_track_stack();
65442
65443 cfpkt_extr_head(pkt, &cmdrsp, 1);
65444 cmd = cmdrsp & CFCTRL_CMD_MASK;
65445diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65446--- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65447+++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65448@@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65449 struct bcm_sock *bo = bcm_sk(sk);
65450 struct bcm_op *op;
65451
65452+#ifdef CONFIG_GRKERNSEC_HIDESYM
65453+ seq_printf(m, ">>> socket %p", NULL);
65454+ seq_printf(m, " / sk %p", NULL);
65455+ seq_printf(m, " / bo %p", NULL);
65456+#else
65457 seq_printf(m, ">>> socket %p", sk->sk_socket);
65458 seq_printf(m, " / sk %p", sk);
65459 seq_printf(m, " / bo %p", bo);
65460+#endif
65461 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65462 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65463 seq_printf(m, " <<<\n");
65464diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65465--- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65466+++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65467@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65468 }
65469
65470 kfree_skb(skb);
65471- atomic_inc(&sk->sk_drops);
65472+ atomic_inc_unchecked(&sk->sk_drops);
65473 sk_mem_reclaim_partial(sk);
65474
65475 return err;
65476diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65477--- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65478+++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65479@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65480 if (no_module && capable(CAP_NET_ADMIN))
65481 no_module = request_module("netdev-%s", name);
65482 if (no_module && capable(CAP_SYS_MODULE)) {
65483+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65484+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
65485+#else
65486 if (!request_module("%s", name))
65487 pr_err("Loading kernel module for a network device "
65488 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65489 "instead\n", name);
65490+#endif
65491 }
65492 }
65493 EXPORT_SYMBOL(dev_load);
65494@@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65495
65496 struct dev_gso_cb {
65497 void (*destructor)(struct sk_buff *skb);
65498-};
65499+} __no_const;
65500
65501 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65502
65503@@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65504 }
65505 EXPORT_SYMBOL(netif_rx_ni);
65506
65507-static void net_tx_action(struct softirq_action *h)
65508+static void net_tx_action(void)
65509 {
65510 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65511
65512@@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65513 }
65514 EXPORT_SYMBOL(netif_napi_del);
65515
65516-static void net_rx_action(struct softirq_action *h)
65517+static void net_rx_action(void)
65518 {
65519 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65520 unsigned long time_limit = jiffies + 2;
65521diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65522--- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65523+++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65524@@ -60,7 +60,7 @@ struct flow_cache {
65525 struct timer_list rnd_timer;
65526 };
65527
65528-atomic_t flow_cache_genid = ATOMIC_INIT(0);
65529+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65530 EXPORT_SYMBOL(flow_cache_genid);
65531 static struct flow_cache flow_cache_global;
65532 static struct kmem_cache *flow_cachep __read_mostly;
65533@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65534
65535 static int flow_entry_valid(struct flow_cache_entry *fle)
65536 {
65537- if (atomic_read(&flow_cache_genid) != fle->genid)
65538+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65539 return 0;
65540 if (fle->object && !fle->object->ops->check(fle->object))
65541 return 0;
65542@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65543 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65544 fcp->hash_count++;
65545 }
65546- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65547+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65548 flo = fle->object;
65549 if (!flo)
65550 goto ret_object;
65551@@ -274,7 +274,7 @@ nocache:
65552 }
65553 flo = resolver(net, key, family, dir, flo, ctx);
65554 if (fle) {
65555- fle->genid = atomic_read(&flow_cache_genid);
65556+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
65557 if (!IS_ERR(flo))
65558 fle->object = flo;
65559 else
65560diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65561--- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65562+++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65563@@ -56,7 +56,7 @@
65564 struct rtnl_link {
65565 rtnl_doit_func doit;
65566 rtnl_dumpit_func dumpit;
65567-};
65568+} __no_const;
65569
65570 static DEFINE_MUTEX(rtnl_mutex);
65571
65572diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65573--- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65574+++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65575@@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65576 struct sock *sk = skb->sk;
65577 int ret = 0;
65578
65579+ pax_track_stack();
65580+
65581 if (splice_grow_spd(pipe, &spd))
65582 return -ENOMEM;
65583
65584diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65585--- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65586+++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65587@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65588 */
65589 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65590 (unsigned)sk->sk_rcvbuf) {
65591- atomic_inc(&sk->sk_drops);
65592+ atomic_inc_unchecked(&sk->sk_drops);
65593 return -ENOMEM;
65594 }
65595
65596@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65597 return err;
65598
65599 if (!sk_rmem_schedule(sk, skb->truesize)) {
65600- atomic_inc(&sk->sk_drops);
65601+ atomic_inc_unchecked(&sk->sk_drops);
65602 return -ENOBUFS;
65603 }
65604
65605@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65606 skb_dst_force(skb);
65607
65608 spin_lock_irqsave(&list->lock, flags);
65609- skb->dropcount = atomic_read(&sk->sk_drops);
65610+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65611 __skb_queue_tail(list, skb);
65612 spin_unlock_irqrestore(&list->lock, flags);
65613
65614@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65615 skb->dev = NULL;
65616
65617 if (sk_rcvqueues_full(sk, skb)) {
65618- atomic_inc(&sk->sk_drops);
65619+ atomic_inc_unchecked(&sk->sk_drops);
65620 goto discard_and_relse;
65621 }
65622 if (nested)
65623@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65624 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65625 } else if (sk_add_backlog(sk, skb)) {
65626 bh_unlock_sock(sk);
65627- atomic_inc(&sk->sk_drops);
65628+ atomic_inc_unchecked(&sk->sk_drops);
65629 goto discard_and_relse;
65630 }
65631
65632@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65633 return -ENOTCONN;
65634 if (lv < len)
65635 return -EINVAL;
65636- if (copy_to_user(optval, address, len))
65637+ if (len > sizeof(address) || copy_to_user(optval, address, len))
65638 return -EFAULT;
65639 goto lenout;
65640 }
65641@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65642
65643 if (len > lv)
65644 len = lv;
65645- if (copy_to_user(optval, &v, len))
65646+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
65647 return -EFAULT;
65648 lenout:
65649 if (put_user(len, optlen))
65650@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65651 */
65652 smp_wmb();
65653 atomic_set(&sk->sk_refcnt, 1);
65654- atomic_set(&sk->sk_drops, 0);
65655+ atomic_set_unchecked(&sk->sk_drops, 0);
65656 }
65657 EXPORT_SYMBOL(sock_init_data);
65658
65659diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65660--- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65661+++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65662@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65663
65664 if (len > *lenp) len = *lenp;
65665
65666- if (copy_to_user(buffer, addr, len))
65667+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
65668 return -EFAULT;
65669
65670 *lenp = len;
65671@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65672
65673 if (len > *lenp) len = *lenp;
65674
65675- if (copy_to_user(buffer, devname, len))
65676+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
65677 return -EFAULT;
65678
65679 *lenp = len;
65680diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65681--- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65682+++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65683@@ -4,7 +4,7 @@
65684
65685 config ECONET
65686 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65687- depends on EXPERIMENTAL && INET
65688+ depends on EXPERIMENTAL && INET && BROKEN
65689 ---help---
65690 Econet is a fairly old and slow networking protocol mainly used by
65691 Acorn computers to access file and print servers. It uses native
65692diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65693--- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65694+++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65695@@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65696 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65697 fib_sync_up(dev);
65698 #endif
65699- atomic_inc(&net->ipv4.dev_addr_genid);
65700+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65701 rt_cache_flush(dev_net(dev), -1);
65702 break;
65703 case NETDEV_DOWN:
65704 fib_del_ifaddr(ifa, NULL);
65705- atomic_inc(&net->ipv4.dev_addr_genid);
65706+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65707 if (ifa->ifa_dev->ifa_list == NULL) {
65708 /* Last address was deleted from this interface.
65709 * Disable IP.
65710@@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65711 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65712 fib_sync_up(dev);
65713 #endif
65714- atomic_inc(&net->ipv4.dev_addr_genid);
65715+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65716 rt_cache_flush(dev_net(dev), -1);
65717 break;
65718 case NETDEV_DOWN:
65719diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65720--- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65721+++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65722@@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65723 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65724 nh->nh_gw,
65725 nh->nh_parent->fib_scope);
65726- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65727+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65728
65729 return nh->nh_saddr;
65730 }
65731diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65732--- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65733+++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65734@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65735 r->idiag_retrans = 0;
65736
65737 r->id.idiag_if = sk->sk_bound_dev_if;
65738+
65739+#ifdef CONFIG_GRKERNSEC_HIDESYM
65740+ r->id.idiag_cookie[0] = 0;
65741+ r->id.idiag_cookie[1] = 0;
65742+#else
65743 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65744 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65745+#endif
65746
65747 r->id.idiag_sport = inet->inet_sport;
65748 r->id.idiag_dport = inet->inet_dport;
65749@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65750 r->idiag_family = tw->tw_family;
65751 r->idiag_retrans = 0;
65752 r->id.idiag_if = tw->tw_bound_dev_if;
65753+
65754+#ifdef CONFIG_GRKERNSEC_HIDESYM
65755+ r->id.idiag_cookie[0] = 0;
65756+ r->id.idiag_cookie[1] = 0;
65757+#else
65758 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65759 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65760+#endif
65761+
65762 r->id.idiag_sport = tw->tw_sport;
65763 r->id.idiag_dport = tw->tw_dport;
65764 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65765@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65766 if (sk == NULL)
65767 goto unlock;
65768
65769+#ifndef CONFIG_GRKERNSEC_HIDESYM
65770 err = -ESTALE;
65771 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65772 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65773 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65774 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65775 goto out;
65776+#endif
65777
65778 err = -ENOMEM;
65779 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65780@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65781 r->idiag_retrans = req->retrans;
65782
65783 r->id.idiag_if = sk->sk_bound_dev_if;
65784+
65785+#ifdef CONFIG_GRKERNSEC_HIDESYM
65786+ r->id.idiag_cookie[0] = 0;
65787+ r->id.idiag_cookie[1] = 0;
65788+#else
65789 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65790 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65791+#endif
65792
65793 tmo = req->expires - jiffies;
65794 if (tmo < 0)
65795diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65796--- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65797+++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65798@@ -18,11 +18,14 @@
65799 #include <linux/sched.h>
65800 #include <linux/slab.h>
65801 #include <linux/wait.h>
65802+#include <linux/security.h>
65803
65804 #include <net/inet_connection_sock.h>
65805 #include <net/inet_hashtables.h>
65806 #include <net/ip.h>
65807
65808+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65809+
65810 /*
65811 * Allocate and initialize a new local port bind bucket.
65812 * The bindhash mutex for snum's hash chain must be held here.
65813@@ -529,6 +532,8 @@ ok:
65814 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65815 spin_unlock(&head->lock);
65816
65817+ gr_update_task_in_ip_table(current, inet_sk(sk));
65818+
65819 if (tw) {
65820 inet_twsk_deschedule(tw, death_row);
65821 while (twrefcnt) {
65822diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65823--- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65824+++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65825@@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65826 unsigned int sequence;
65827 int invalidated, newrefcnt = 0;
65828
65829+ pax_track_stack();
65830+
65831 /* Look up for the address quickly, lockless.
65832 * Because of a concurrent writer, we might not find an existing entry.
65833 */
65834@@ -516,8 +518,8 @@ found: /* The existing node has been fo
65835 if (p) {
65836 p->daddr = *daddr;
65837 atomic_set(&p->refcnt, 1);
65838- atomic_set(&p->rid, 0);
65839- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65840+ atomic_set_unchecked(&p->rid, 0);
65841+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65842 p->tcp_ts_stamp = 0;
65843 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65844 p->rate_tokens = 0;
65845diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65846--- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65847+++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65848@@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65849 return 0;
65850
65851 start = qp->rid;
65852- end = atomic_inc_return(&peer->rid);
65853+ end = atomic_inc_return_unchecked(&peer->rid);
65854 qp->rid = end;
65855
65856 rc = qp->q.fragments && (end - start) > max;
65857diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65858--- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65859+++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65860@@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65861 int val;
65862 int len;
65863
65864+ pax_track_stack();
65865+
65866 if (level != SOL_IP)
65867 return -EOPNOTSUPP;
65868
65869diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65870--- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65871+++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65872@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65873
65874 *len = 0;
65875
65876- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65877+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65878 if (*octets == NULL) {
65879 if (net_ratelimit())
65880 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65881diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65882--- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65883+++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-14 11:22:59.000000000 -0400
65884@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65885 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65886 {
65887 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65888- atomic_inc(&sk->sk_drops);
65889+ atomic_inc_unchecked(&sk->sk_drops);
65890 kfree_skb(skb);
65891 return NET_RX_DROP;
65892 }
65893@@ -730,16 +730,20 @@ static int raw_init(struct sock *sk)
65894
65895 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65896 {
65897+ struct icmp_filter filter;
65898+
65899 if (optlen > sizeof(struct icmp_filter))
65900 optlen = sizeof(struct icmp_filter);
65901- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65902+ if (copy_from_user(&filter, optval, optlen))
65903 return -EFAULT;
65904+ raw_sk(sk)->filter = filter;
65905 return 0;
65906 }
65907
65908 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65909 {
65910 int len, ret = -EFAULT;
65911+ struct icmp_filter filter;
65912
65913 if (get_user(len, optlen))
65914 goto out;
65915@@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65916 if (len > sizeof(struct icmp_filter))
65917 len = sizeof(struct icmp_filter);
65918 ret = -EFAULT;
65919- if (put_user(len, optlen) ||
65920- copy_to_user(optval, &raw_sk(sk)->filter, len))
65921+ filter = raw_sk(sk)->filter;
65922+ if (put_user(len, optlen) || len > sizeof filter ||
65923+ copy_to_user(optval, &filter, len))
65924 goto out;
65925 ret = 0;
65926 out: return ret;
65927@@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65928 sk_wmem_alloc_get(sp),
65929 sk_rmem_alloc_get(sp),
65930 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65931- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65932+ atomic_read(&sp->sk_refcnt),
65933+#ifdef CONFIG_GRKERNSEC_HIDESYM
65934+ NULL,
65935+#else
65936+ sp,
65937+#endif
65938+ atomic_read_unchecked(&sp->sk_drops));
65939 }
65940
65941 static int raw_seq_show(struct seq_file *seq, void *v)
65942diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65943--- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65944+++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65945@@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65946
65947 static inline int rt_genid(struct net *net)
65948 {
65949- return atomic_read(&net->ipv4.rt_genid);
65950+ return atomic_read_unchecked(&net->ipv4.rt_genid);
65951 }
65952
65953 #ifdef CONFIG_PROC_FS
65954@@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65955 unsigned char shuffle;
65956
65957 get_random_bytes(&shuffle, sizeof(shuffle));
65958- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65959+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65960 }
65961
65962 /*
65963@@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65964 rt->peer->pmtu_expires - jiffies : 0;
65965 if (rt->peer) {
65966 inet_peer_refcheck(rt->peer);
65967- id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65968+ id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65969 if (rt->peer->tcp_ts_stamp) {
65970 ts = rt->peer->tcp_ts;
65971 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65972diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
65973--- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
65974+++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
65975@@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
65976 int val;
65977 int err = 0;
65978
65979+ pax_track_stack();
65980+
65981 /* These are data/string values, all the others are ints */
65982 switch (optname) {
65983 case TCP_CONGESTION: {
65984@@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
65985 struct tcp_sock *tp = tcp_sk(sk);
65986 int val, len;
65987
65988+ pax_track_stack();
65989+
65990 if (get_user(len, optlen))
65991 return -EFAULT;
65992
65993diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
65994--- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
65995+++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
65996@@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65997 int sysctl_tcp_low_latency __read_mostly;
65998 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65999
66000+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66001+extern int grsec_enable_blackhole;
66002+#endif
66003
66004 #ifdef CONFIG_TCP_MD5SIG
66005 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
66006@@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
66007 return 0;
66008
66009 reset:
66010+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66011+ if (!grsec_enable_blackhole)
66012+#endif
66013 tcp_v4_send_reset(rsk, skb);
66014 discard:
66015 kfree_skb(skb);
66016@@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
66017 TCP_SKB_CB(skb)->sacked = 0;
66018
66019 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66020- if (!sk)
66021+ if (!sk) {
66022+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66023+ ret = 1;
66024+#endif
66025 goto no_tcp_socket;
66026-
66027+ }
66028 process:
66029- if (sk->sk_state == TCP_TIME_WAIT)
66030+ if (sk->sk_state == TCP_TIME_WAIT) {
66031+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66032+ ret = 2;
66033+#endif
66034 goto do_time_wait;
66035+ }
66036
66037 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
66038 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66039@@ -1711,6 +1724,10 @@ no_tcp_socket:
66040 bad_packet:
66041 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66042 } else {
66043+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66044+ if (!grsec_enable_blackhole || (ret == 1 &&
66045+ (skb->dev->flags & IFF_LOOPBACK)))
66046+#endif
66047 tcp_v4_send_reset(NULL, skb);
66048 }
66049
66050@@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
66051 0, /* non standard timer */
66052 0, /* open_requests have no inode */
66053 atomic_read(&sk->sk_refcnt),
66054+#ifdef CONFIG_GRKERNSEC_HIDESYM
66055+ NULL,
66056+#else
66057 req,
66058+#endif
66059 len);
66060 }
66061
66062@@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
66063 sock_i_uid(sk),
66064 icsk->icsk_probes_out,
66065 sock_i_ino(sk),
66066- atomic_read(&sk->sk_refcnt), sk,
66067+ atomic_read(&sk->sk_refcnt),
66068+#ifdef CONFIG_GRKERNSEC_HIDESYM
66069+ NULL,
66070+#else
66071+ sk,
66072+#endif
66073 jiffies_to_clock_t(icsk->icsk_rto),
66074 jiffies_to_clock_t(icsk->icsk_ack.ato),
66075 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
66076@@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
66077 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
66078 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
66079 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66080- atomic_read(&tw->tw_refcnt), tw, len);
66081+ atomic_read(&tw->tw_refcnt),
66082+#ifdef CONFIG_GRKERNSEC_HIDESYM
66083+ NULL,
66084+#else
66085+ tw,
66086+#endif
66087+ len);
66088 }
66089
66090 #define TMPSZ 150
66091diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
66092--- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
66093+++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
66094@@ -27,6 +27,10 @@
66095 #include <net/inet_common.h>
66096 #include <net/xfrm.h>
66097
66098+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66099+extern int grsec_enable_blackhole;
66100+#endif
66101+
66102 int sysctl_tcp_syncookies __read_mostly = 1;
66103 EXPORT_SYMBOL(sysctl_tcp_syncookies);
66104
66105@@ -745,6 +749,10 @@ listen_overflow:
66106
66107 embryonic_reset:
66108 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
66109+
66110+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66111+ if (!grsec_enable_blackhole)
66112+#endif
66113 if (!(flg & TCP_FLAG_RST))
66114 req->rsk_ops->send_reset(sk, skb);
66115
66116diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
66117--- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
66118+++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
66119@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
66120 int mss;
66121 int s_data_desired = 0;
66122
66123+ pax_track_stack();
66124+
66125 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
66126 s_data_desired = cvp->s_data_desired;
66127 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
66128diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
66129--- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
66130+++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
66131@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
66132 if (cnt + width >= len)
66133 break;
66134
66135- if (copy_to_user(buf + cnt, tbuf, width))
66136+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
66137 return -EFAULT;
66138 cnt += width;
66139 }
66140diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
66141--- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
66142+++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
66143@@ -22,6 +22,10 @@
66144 #include <linux/gfp.h>
66145 #include <net/tcp.h>
66146
66147+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66148+extern int grsec_lastack_retries;
66149+#endif
66150+
66151 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66152 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66153 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66154@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66155 }
66156 }
66157
66158+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66159+ if ((sk->sk_state == TCP_LAST_ACK) &&
66160+ (grsec_lastack_retries > 0) &&
66161+ (grsec_lastack_retries < retry_until))
66162+ retry_until = grsec_lastack_retries;
66163+#endif
66164+
66165 if (retransmits_timed_out(sk, retry_until,
66166 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66167 /* Has it gone just too far? */
66168diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66169--- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66170+++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66171@@ -86,6 +86,7 @@
66172 #include <linux/types.h>
66173 #include <linux/fcntl.h>
66174 #include <linux/module.h>
66175+#include <linux/security.h>
66176 #include <linux/socket.h>
66177 #include <linux/sockios.h>
66178 #include <linux/igmp.h>
66179@@ -107,6 +108,10 @@
66180 #include <net/xfrm.h>
66181 #include "udp_impl.h"
66182
66183+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66184+extern int grsec_enable_blackhole;
66185+#endif
66186+
66187 struct udp_table udp_table __read_mostly;
66188 EXPORT_SYMBOL(udp_table);
66189
66190@@ -564,6 +569,9 @@ found:
66191 return s;
66192 }
66193
66194+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66195+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66196+
66197 /*
66198 * This routine is called by the ICMP module when it gets some
66199 * sort of error condition. If err < 0 then the socket should
66200@@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66201 dport = usin->sin_port;
66202 if (dport == 0)
66203 return -EINVAL;
66204+
66205+ err = gr_search_udp_sendmsg(sk, usin);
66206+ if (err)
66207+ return err;
66208 } else {
66209 if (sk->sk_state != TCP_ESTABLISHED)
66210 return -EDESTADDRREQ;
66211+
66212+ err = gr_search_udp_sendmsg(sk, NULL);
66213+ if (err)
66214+ return err;
66215+
66216 daddr = inet->inet_daddr;
66217 dport = inet->inet_dport;
66218 /* Open fast path for connected socket.
66219@@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66220 udp_lib_checksum_complete(skb)) {
66221 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66222 IS_UDPLITE(sk));
66223- atomic_inc(&sk->sk_drops);
66224+ atomic_inc_unchecked(&sk->sk_drops);
66225 __skb_unlink(skb, rcvq);
66226 __skb_queue_tail(&list_kill, skb);
66227 }
66228@@ -1176,6 +1193,10 @@ try_again:
66229 if (!skb)
66230 goto out;
66231
66232+ err = gr_search_udp_recvmsg(sk, skb);
66233+ if (err)
66234+ goto out_free;
66235+
66236 ulen = skb->len - sizeof(struct udphdr);
66237 if (len > ulen)
66238 len = ulen;
66239@@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66240
66241 drop:
66242 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66243- atomic_inc(&sk->sk_drops);
66244+ atomic_inc_unchecked(&sk->sk_drops);
66245 kfree_skb(skb);
66246 return -1;
66247 }
66248@@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66249 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66250
66251 if (!skb1) {
66252- atomic_inc(&sk->sk_drops);
66253+ atomic_inc_unchecked(&sk->sk_drops);
66254 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66255 IS_UDPLITE(sk));
66256 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66257@@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66258 goto csum_error;
66259
66260 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66261+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66262+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66263+#endif
66264 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66265
66266 /*
66267@@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66268 sk_wmem_alloc_get(sp),
66269 sk_rmem_alloc_get(sp),
66270 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66271- atomic_read(&sp->sk_refcnt), sp,
66272- atomic_read(&sp->sk_drops), len);
66273+ atomic_read(&sp->sk_refcnt),
66274+#ifdef CONFIG_GRKERNSEC_HIDESYM
66275+ NULL,
66276+#else
66277+ sp,
66278+#endif
66279+ atomic_read_unchecked(&sp->sk_drops), len);
66280 }
66281
66282 int udp4_seq_show(struct seq_file *seq, void *v)
66283diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66284--- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66285+++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66286@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66287 #ifdef CONFIG_XFRM
66288 {
66289 struct rt6_info *rt = (struct rt6_info *)dst;
66290- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66291+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66292 }
66293 #endif
66294 }
66295@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66296 #ifdef CONFIG_XFRM
66297 if (dst) {
66298 struct rt6_info *rt = (struct rt6_info *)dst;
66299- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66300+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66301 __sk_dst_reset(sk);
66302 dst = NULL;
66303 }
66304diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66305--- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66306+++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66307@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66308 int val, valbool;
66309 int retv = -ENOPROTOOPT;
66310
66311+ pax_track_stack();
66312+
66313 if (optval == NULL)
66314 val=0;
66315 else {
66316@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66317 int len;
66318 int val;
66319
66320+ pax_track_stack();
66321+
66322 if (ip6_mroute_opt(optname))
66323 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66324
66325diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66326--- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66327+++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-14 11:25:44.000000000 -0400
66328@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66329 {
66330 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66331 skb_checksum_complete(skb)) {
66332- atomic_inc(&sk->sk_drops);
66333+ atomic_inc_unchecked(&sk->sk_drops);
66334 kfree_skb(skb);
66335 return NET_RX_DROP;
66336 }
66337@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66338 struct raw6_sock *rp = raw6_sk(sk);
66339
66340 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66341- atomic_inc(&sk->sk_drops);
66342+ atomic_inc_unchecked(&sk->sk_drops);
66343 kfree_skb(skb);
66344 return NET_RX_DROP;
66345 }
66346@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66347
66348 if (inet->hdrincl) {
66349 if (skb_checksum_complete(skb)) {
66350- atomic_inc(&sk->sk_drops);
66351+ atomic_inc_unchecked(&sk->sk_drops);
66352 kfree_skb(skb);
66353 return NET_RX_DROP;
66354 }
66355@@ -601,7 +601,7 @@ out:
66356 return err;
66357 }
66358
66359-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66360+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66361 struct flowi6 *fl6, struct dst_entry **dstp,
66362 unsigned int flags)
66363 {
66364@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66365 u16 proto;
66366 int err;
66367
66368+ pax_track_stack();
66369+
66370 /* Rough check on arithmetic overflow,
66371 better check is made in ip6_append_data().
66372 */
66373@@ -909,12 +911,15 @@ do_confirm:
66374 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66375 char __user *optval, int optlen)
66376 {
66377+ struct icmp6_filter filter;
66378+
66379 switch (optname) {
66380 case ICMPV6_FILTER:
66381 if (optlen > sizeof(struct icmp6_filter))
66382 optlen = sizeof(struct icmp6_filter);
66383- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66384+ if (copy_from_user(&filter, optval, optlen))
66385 return -EFAULT;
66386+ raw6_sk(sk)->filter = filter;
66387 return 0;
66388 default:
66389 return -ENOPROTOOPT;
66390@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66391 char __user *optval, int __user *optlen)
66392 {
66393 int len;
66394+ struct icmp6_filter filter;
66395
66396 switch (optname) {
66397 case ICMPV6_FILTER:
66398@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66399 len = sizeof(struct icmp6_filter);
66400 if (put_user(len, optlen))
66401 return -EFAULT;
66402- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66403+ filter = raw6_sk(sk)->filter;
66404+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
66405 return -EFAULT;
66406 return 0;
66407 default:
66408@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66409 0, 0L, 0,
66410 sock_i_uid(sp), 0,
66411 sock_i_ino(sp),
66412- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66413+ atomic_read(&sp->sk_refcnt),
66414+#ifdef CONFIG_GRKERNSEC_HIDESYM
66415+ NULL,
66416+#else
66417+ sp,
66418+#endif
66419+ atomic_read_unchecked(&sp->sk_drops));
66420 }
66421
66422 static int raw6_seq_show(struct seq_file *seq, void *v)
66423diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66424--- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66425+++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66426@@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66427 }
66428 #endif
66429
66430+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66431+extern int grsec_enable_blackhole;
66432+#endif
66433+
66434 static void tcp_v6_hash(struct sock *sk)
66435 {
66436 if (sk->sk_state != TCP_CLOSE) {
66437@@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66438 return 0;
66439
66440 reset:
66441+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66442+ if (!grsec_enable_blackhole)
66443+#endif
66444 tcp_v6_send_reset(sk, skb);
66445 discard:
66446 if (opt_skb)
66447@@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66448 TCP_SKB_CB(skb)->sacked = 0;
66449
66450 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66451- if (!sk)
66452+ if (!sk) {
66453+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66454+ ret = 1;
66455+#endif
66456 goto no_tcp_socket;
66457+ }
66458
66459 process:
66460- if (sk->sk_state == TCP_TIME_WAIT)
66461+ if (sk->sk_state == TCP_TIME_WAIT) {
66462+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66463+ ret = 2;
66464+#endif
66465 goto do_time_wait;
66466+ }
66467
66468 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66469 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66470@@ -1792,6 +1807,10 @@ no_tcp_socket:
66471 bad_packet:
66472 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66473 } else {
66474+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66475+ if (!grsec_enable_blackhole || (ret == 1 &&
66476+ (skb->dev->flags & IFF_LOOPBACK)))
66477+#endif
66478 tcp_v6_send_reset(NULL, skb);
66479 }
66480
66481@@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66482 uid,
66483 0, /* non standard timer */
66484 0, /* open_requests have no inode */
66485- 0, req);
66486+ 0,
66487+#ifdef CONFIG_GRKERNSEC_HIDESYM
66488+ NULL
66489+#else
66490+ req
66491+#endif
66492+ );
66493 }
66494
66495 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66496@@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66497 sock_i_uid(sp),
66498 icsk->icsk_probes_out,
66499 sock_i_ino(sp),
66500- atomic_read(&sp->sk_refcnt), sp,
66501+ atomic_read(&sp->sk_refcnt),
66502+#ifdef CONFIG_GRKERNSEC_HIDESYM
66503+ NULL,
66504+#else
66505+ sp,
66506+#endif
66507 jiffies_to_clock_t(icsk->icsk_rto),
66508 jiffies_to_clock_t(icsk->icsk_ack.ato),
66509 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66510@@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66511 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66512 tw->tw_substate, 0, 0,
66513 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66514- atomic_read(&tw->tw_refcnt), tw);
66515+ atomic_read(&tw->tw_refcnt),
66516+#ifdef CONFIG_GRKERNSEC_HIDESYM
66517+ NULL
66518+#else
66519+ tw
66520+#endif
66521+ );
66522 }
66523
66524 static int tcp6_seq_show(struct seq_file *seq, void *v)
66525diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66526--- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66527+++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66528@@ -50,6 +50,10 @@
66529 #include <linux/seq_file.h>
66530 #include "udp_impl.h"
66531
66532+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66533+extern int grsec_enable_blackhole;
66534+#endif
66535+
66536 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66537 {
66538 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66539@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66540
66541 return 0;
66542 drop:
66543- atomic_inc(&sk->sk_drops);
66544+ atomic_inc_unchecked(&sk->sk_drops);
66545 drop_no_sk_drops_inc:
66546 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66547 kfree_skb(skb);
66548@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66549 continue;
66550 }
66551 drop:
66552- atomic_inc(&sk->sk_drops);
66553+ atomic_inc_unchecked(&sk->sk_drops);
66554 UDP6_INC_STATS_BH(sock_net(sk),
66555 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66556 UDP6_INC_STATS_BH(sock_net(sk),
66557@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66558 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66559 proto == IPPROTO_UDPLITE);
66560
66561+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66562+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66563+#endif
66564 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66565
66566 kfree_skb(skb);
66567@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66568 if (!sock_owned_by_user(sk))
66569 udpv6_queue_rcv_skb(sk, skb);
66570 else if (sk_add_backlog(sk, skb)) {
66571- atomic_inc(&sk->sk_drops);
66572+ atomic_inc_unchecked(&sk->sk_drops);
66573 bh_unlock_sock(sk);
66574 sock_put(sk);
66575 goto discard;
66576@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66577 0, 0L, 0,
66578 sock_i_uid(sp), 0,
66579 sock_i_ino(sp),
66580- atomic_read(&sp->sk_refcnt), sp,
66581- atomic_read(&sp->sk_drops));
66582+ atomic_read(&sp->sk_refcnt),
66583+#ifdef CONFIG_GRKERNSEC_HIDESYM
66584+ NULL,
66585+#else
66586+ sp,
66587+#endif
66588+ atomic_read_unchecked(&sp->sk_drops));
66589 }
66590
66591 int udp6_seq_show(struct seq_file *seq, void *v)
66592diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66593--- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66594+++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66595@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66596 add_wait_queue(&self->open_wait, &wait);
66597
66598 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66599- __FILE__,__LINE__, tty->driver->name, self->open_count );
66600+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66601
66602 /* As far as I can see, we protect open_count - Jean II */
66603 spin_lock_irqsave(&self->spinlock, flags);
66604 if (!tty_hung_up_p(filp)) {
66605 extra_count = 1;
66606- self->open_count--;
66607+ local_dec(&self->open_count);
66608 }
66609 spin_unlock_irqrestore(&self->spinlock, flags);
66610- self->blocked_open++;
66611+ local_inc(&self->blocked_open);
66612
66613 while (1) {
66614 if (tty->termios->c_cflag & CBAUD) {
66615@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66616 }
66617
66618 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66619- __FILE__,__LINE__, tty->driver->name, self->open_count );
66620+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66621
66622 schedule();
66623 }
66624@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66625 if (extra_count) {
66626 /* ++ is not atomic, so this should be protected - Jean II */
66627 spin_lock_irqsave(&self->spinlock, flags);
66628- self->open_count++;
66629+ local_inc(&self->open_count);
66630 spin_unlock_irqrestore(&self->spinlock, flags);
66631 }
66632- self->blocked_open--;
66633+ local_dec(&self->blocked_open);
66634
66635 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66636- __FILE__,__LINE__, tty->driver->name, self->open_count);
66637+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66638
66639 if (!retval)
66640 self->flags |= ASYNC_NORMAL_ACTIVE;
66641@@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66642 }
66643 /* ++ is not atomic, so this should be protected - Jean II */
66644 spin_lock_irqsave(&self->spinlock, flags);
66645- self->open_count++;
66646+ local_inc(&self->open_count);
66647
66648 tty->driver_data = self;
66649 self->tty = tty;
66650 spin_unlock_irqrestore(&self->spinlock, flags);
66651
66652 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66653- self->line, self->open_count);
66654+ self->line, local_read(&self->open_count));
66655
66656 /* Not really used by us, but lets do it anyway */
66657 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66658@@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66659 return;
66660 }
66661
66662- if ((tty->count == 1) && (self->open_count != 1)) {
66663+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66664 /*
66665 * Uh, oh. tty->count is 1, which means that the tty
66666 * structure will be freed. state->count should always
66667@@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66668 */
66669 IRDA_DEBUG(0, "%s(), bad serial port count; "
66670 "tty->count is 1, state->count is %d\n", __func__ ,
66671- self->open_count);
66672- self->open_count = 1;
66673+ local_read(&self->open_count));
66674+ local_set(&self->open_count, 1);
66675 }
66676
66677- if (--self->open_count < 0) {
66678+ if (local_dec_return(&self->open_count) < 0) {
66679 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66680- __func__, self->line, self->open_count);
66681- self->open_count = 0;
66682+ __func__, self->line, local_read(&self->open_count));
66683+ local_set(&self->open_count, 0);
66684 }
66685- if (self->open_count) {
66686+ if (local_read(&self->open_count)) {
66687 spin_unlock_irqrestore(&self->spinlock, flags);
66688
66689 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66690@@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66691 tty->closing = 0;
66692 self->tty = NULL;
66693
66694- if (self->blocked_open) {
66695+ if (local_read(&self->blocked_open)) {
66696 if (self->close_delay)
66697 schedule_timeout_interruptible(self->close_delay);
66698 wake_up_interruptible(&self->open_wait);
66699@@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66700 spin_lock_irqsave(&self->spinlock, flags);
66701 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66702 self->tty = NULL;
66703- self->open_count = 0;
66704+ local_set(&self->open_count, 0);
66705 spin_unlock_irqrestore(&self->spinlock, flags);
66706
66707 wake_up_interruptible(&self->open_wait);
66708@@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66709 seq_putc(m, '\n');
66710
66711 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66712- seq_printf(m, "Open count: %d\n", self->open_count);
66713+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66714 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66715 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66716
66717diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66718--- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66719+++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66720@@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66721
66722 write_lock_bh(&iucv_sk_list.lock);
66723
66724- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66725+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66726 while (__iucv_get_sock_by_name(name)) {
66727 sprintf(name, "%08x",
66728- atomic_inc_return(&iucv_sk_list.autobind_name));
66729+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66730 }
66731
66732 write_unlock_bh(&iucv_sk_list.lock);
66733diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66734--- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66735+++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66736@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66737 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66738 struct xfrm_kmaddress k;
66739
66740+ pax_track_stack();
66741+
66742 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66743 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66744 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66745@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66746 static u32 get_acqseq(void)
66747 {
66748 u32 res;
66749- static atomic_t acqseq;
66750+ static atomic_unchecked_t acqseq;
66751
66752 do {
66753- res = atomic_inc_return(&acqseq);
66754+ res = atomic_inc_return_unchecked(&acqseq);
66755 } while (!res);
66756 return res;
66757 }
66758@@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66759 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66760 else
66761 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66762+#ifdef CONFIG_GRKERNSEC_HIDESYM
66763+ NULL,
66764+#else
66765 s,
66766+#endif
66767 atomic_read(&s->sk_refcnt),
66768 sk_rmem_alloc_get(s),
66769 sk_wmem_alloc_get(s),
66770diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66771--- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66772+++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66773@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66774 goto out;
66775
66776 lapb->dev = dev;
66777- lapb->callbacks = *callbacks;
66778+ lapb->callbacks = callbacks;
66779
66780 __lapb_insert_cb(lapb);
66781
66782@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66783
66784 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66785 {
66786- if (lapb->callbacks.connect_confirmation)
66787- lapb->callbacks.connect_confirmation(lapb->dev, reason);
66788+ if (lapb->callbacks->connect_confirmation)
66789+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
66790 }
66791
66792 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66793 {
66794- if (lapb->callbacks.connect_indication)
66795- lapb->callbacks.connect_indication(lapb->dev, reason);
66796+ if (lapb->callbacks->connect_indication)
66797+ lapb->callbacks->connect_indication(lapb->dev, reason);
66798 }
66799
66800 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66801 {
66802- if (lapb->callbacks.disconnect_confirmation)
66803- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66804+ if (lapb->callbacks->disconnect_confirmation)
66805+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66806 }
66807
66808 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66809 {
66810- if (lapb->callbacks.disconnect_indication)
66811- lapb->callbacks.disconnect_indication(lapb->dev, reason);
66812+ if (lapb->callbacks->disconnect_indication)
66813+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
66814 }
66815
66816 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66817 {
66818- if (lapb->callbacks.data_indication)
66819- return lapb->callbacks.data_indication(lapb->dev, skb);
66820+ if (lapb->callbacks->data_indication)
66821+ return lapb->callbacks->data_indication(lapb->dev, skb);
66822
66823 kfree_skb(skb);
66824 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66825@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66826 {
66827 int used = 0;
66828
66829- if (lapb->callbacks.data_transmit) {
66830- lapb->callbacks.data_transmit(lapb->dev, skb);
66831+ if (lapb->callbacks->data_transmit) {
66832+ lapb->callbacks->data_transmit(lapb->dev, skb);
66833 used = 1;
66834 }
66835
66836diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66837--- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66838+++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66839@@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66840 struct tid_ampdu_rx *tid_rx;
66841 struct tid_ampdu_tx *tid_tx;
66842
66843+ pax_track_stack();
66844+
66845 rcu_read_lock();
66846
66847 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66848@@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66849 struct sta_info *sta = file->private_data;
66850 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66851
66852+ pax_track_stack();
66853+
66854 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66855 htc->ht_supported ? "" : "not ");
66856 if (htc->ht_supported) {
66857diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66858--- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66859+++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66860@@ -27,6 +27,7 @@
66861 #include <net/ieee80211_radiotap.h>
66862 #include <net/cfg80211.h>
66863 #include <net/mac80211.h>
66864+#include <asm/local.h>
66865 #include "key.h"
66866 #include "sta_info.h"
66867
66868@@ -714,7 +715,7 @@ struct ieee80211_local {
66869 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66870 spinlock_t queue_stop_reason_lock;
66871
66872- int open_count;
66873+ local_t open_count;
66874 int monitors, cooked_mntrs;
66875 /* number of interfaces with corresponding FIF_ flags */
66876 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66877diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66878--- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66879+++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66880@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66881 break;
66882 }
66883
66884- if (local->open_count == 0) {
66885+ if (local_read(&local->open_count) == 0) {
66886 res = drv_start(local);
66887 if (res)
66888 goto err_del_bss;
66889@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66890 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66891
66892 if (!is_valid_ether_addr(dev->dev_addr)) {
66893- if (!local->open_count)
66894+ if (!local_read(&local->open_count))
66895 drv_stop(local);
66896 return -EADDRNOTAVAIL;
66897 }
66898@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66899 mutex_unlock(&local->mtx);
66900
66901 if (coming_up)
66902- local->open_count++;
66903+ local_inc(&local->open_count);
66904
66905 if (hw_reconf_flags) {
66906 ieee80211_hw_config(local, hw_reconf_flags);
66907@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66908 err_del_interface:
66909 drv_remove_interface(local, &sdata->vif);
66910 err_stop:
66911- if (!local->open_count)
66912+ if (!local_read(&local->open_count))
66913 drv_stop(local);
66914 err_del_bss:
66915 sdata->bss = NULL;
66916@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66917 }
66918
66919 if (going_down)
66920- local->open_count--;
66921+ local_dec(&local->open_count);
66922
66923 switch (sdata->vif.type) {
66924 case NL80211_IFTYPE_AP_VLAN:
66925@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66926
66927 ieee80211_recalc_ps(local, -1);
66928
66929- if (local->open_count == 0) {
66930+ if (local_read(&local->open_count) == 0) {
66931 if (local->ops->napi_poll)
66932 napi_disable(&local->napi);
66933 ieee80211_clear_tx_pending(local);
66934diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66935--- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66936+++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66937@@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66938 local->hw.conf.power_level = power;
66939 }
66940
66941- if (changed && local->open_count) {
66942+ if (changed && local_read(&local->open_count)) {
66943 ret = drv_config(local, changed);
66944 /*
66945 * Goal:
66946diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66947--- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66948+++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66949@@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66950 bool have_higher_than_11mbit = false;
66951 u16 ap_ht_cap_flags;
66952
66953+ pax_track_stack();
66954+
66955 /* AssocResp and ReassocResp have identical structure */
66956
66957 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66958diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66959--- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66960+++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66961@@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66962 }
66963
66964 /* stop hardware - this must stop RX */
66965- if (local->open_count)
66966+ if (local_read(&local->open_count))
66967 ieee80211_stop_device(local);
66968
66969 local->suspended = true;
66970diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66971--- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66972+++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
66973@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66974
66975 ASSERT_RTNL();
66976
66977- if (local->open_count)
66978+ if (local_read(&local->open_count))
66979 return -EBUSY;
66980
66981 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66982diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
66983--- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
66984+++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
66985@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66986
66987 spin_unlock_irqrestore(&events->lock, status);
66988
66989- if (copy_to_user(buf, pb, p))
66990+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66991 return -EFAULT;
66992
66993 return p;
66994diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
66995--- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
66996+++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
66997@@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
66998 local->resuming = true;
66999
67000 /* restart hardware */
67001- if (local->open_count) {
67002+ if (local_read(&local->open_count)) {
67003 /*
67004 * Upon resume hardware can sometimes be goofy due to
67005 * various platform / driver / bus issues, so restarting
67006diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
67007--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
67008+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
67009@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
67010 /* Increase the refcnt counter of the dest */
67011 atomic_inc(&dest->refcnt);
67012
67013- conn_flags = atomic_read(&dest->conn_flags);
67014+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
67015 if (cp->protocol != IPPROTO_UDP)
67016 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
67017 /* Bind with the destination and its corresponding transmitter */
67018@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
67019 atomic_set(&cp->refcnt, 1);
67020
67021 atomic_set(&cp->n_control, 0);
67022- atomic_set(&cp->in_pkts, 0);
67023+ atomic_set_unchecked(&cp->in_pkts, 0);
67024
67025 atomic_inc(&ipvs->conn_count);
67026 if (flags & IP_VS_CONN_F_NO_CPORT)
67027@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
67028
67029 /* Don't drop the entry if its number of incoming packets is not
67030 located in [0, 8] */
67031- i = atomic_read(&cp->in_pkts);
67032+ i = atomic_read_unchecked(&cp->in_pkts);
67033 if (i > 8 || i < 0) return 0;
67034
67035 if (!todrop_rate[i]) return 0;
67036diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
67037--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
67038+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
67039@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
67040 ret = cp->packet_xmit(skb, cp, pd->pp);
67041 /* do not touch skb anymore */
67042
67043- atomic_inc(&cp->in_pkts);
67044+ atomic_inc_unchecked(&cp->in_pkts);
67045 ip_vs_conn_put(cp);
67046 return ret;
67047 }
67048@@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
67049 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
67050 pkts = sysctl_sync_threshold(ipvs);
67051 else
67052- pkts = atomic_add_return(1, &cp->in_pkts);
67053+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67054
67055 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
67056 cp->protocol == IPPROTO_SCTP) {
67057diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
67058--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
67059+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
67060@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
67061 ip_vs_rs_hash(ipvs, dest);
67062 write_unlock_bh(&ipvs->rs_lock);
67063 }
67064- atomic_set(&dest->conn_flags, conn_flags);
67065+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
67066
67067 /* bind the service */
67068 if (!dest->svc) {
67069@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
67070 " %-7s %-6d %-10d %-10d\n",
67071 &dest->addr.in6,
67072 ntohs(dest->port),
67073- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67074+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67075 atomic_read(&dest->weight),
67076 atomic_read(&dest->activeconns),
67077 atomic_read(&dest->inactconns));
67078@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
67079 "%-7s %-6d %-10d %-10d\n",
67080 ntohl(dest->addr.ip),
67081 ntohs(dest->port),
67082- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67083+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67084 atomic_read(&dest->weight),
67085 atomic_read(&dest->activeconns),
67086 atomic_read(&dest->inactconns));
67087@@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
67088 struct ip_vs_dest_user *udest_compat;
67089 struct ip_vs_dest_user_kern udest;
67090
67091+ pax_track_stack();
67092+
67093 if (!capable(CAP_NET_ADMIN))
67094 return -EPERM;
67095
67096@@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
67097
67098 entry.addr = dest->addr.ip;
67099 entry.port = dest->port;
67100- entry.conn_flags = atomic_read(&dest->conn_flags);
67101+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
67102 entry.weight = atomic_read(&dest->weight);
67103 entry.u_threshold = dest->u_threshold;
67104 entry.l_threshold = dest->l_threshold;
67105@@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
67106 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
67107
67108 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
67109- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67110+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67111 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
67112 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
67113 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
67114diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
67115--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
67116+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
67117@@ -648,7 +648,7 @@ control:
67118 * i.e only increment in_pkts for Templates.
67119 */
67120 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
67121- int pkts = atomic_add_return(1, &cp->in_pkts);
67122+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67123
67124 if (pkts % sysctl_sync_period(ipvs) != 1)
67125 return;
67126@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
67127
67128 if (opt)
67129 memcpy(&cp->in_seq, opt, sizeof(*opt));
67130- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67131+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67132 cp->state = state;
67133 cp->old_state = cp->state;
67134 /*
67135diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
67136--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
67137+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
67138@@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
67139 else
67140 rc = NF_ACCEPT;
67141 /* do not touch skb anymore */
67142- atomic_inc(&cp->in_pkts);
67143+ atomic_inc_unchecked(&cp->in_pkts);
67144 goto out;
67145 }
67146
67147@@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
67148 else
67149 rc = NF_ACCEPT;
67150 /* do not touch skb anymore */
67151- atomic_inc(&cp->in_pkts);
67152+ atomic_inc_unchecked(&cp->in_pkts);
67153 goto out;
67154 }
67155
67156diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67157--- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67158+++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67159@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67160
67161 To compile it as a module, choose M here. If unsure, say N.
67162
67163+config NETFILTER_XT_MATCH_GRADM
67164+ tristate '"gradm" match support'
67165+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67166+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67167+ ---help---
67168+ The gradm match allows to match on grsecurity RBAC being enabled.
67169+ It is useful when iptables rules are applied early on bootup to
67170+ prevent connections to the machine (except from a trusted host)
67171+ while the RBAC system is disabled.
67172+
67173 config NETFILTER_XT_MATCH_HASHLIMIT
67174 tristate '"hashlimit" match support'
67175 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67176diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67177--- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67178+++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67179@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67180 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67181 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67182 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67183+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67184 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67185 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67186 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67187diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67188--- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67189+++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67190@@ -70,7 +70,7 @@ struct nfulnl_instance {
67191 };
67192
67193 static DEFINE_SPINLOCK(instances_lock);
67194-static atomic_t global_seq;
67195+static atomic_unchecked_t global_seq;
67196
67197 #define INSTANCE_BUCKETS 16
67198 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67199@@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67200 /* global sequence number */
67201 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67202 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67203- htonl(atomic_inc_return(&global_seq)));
67204+ htonl(atomic_inc_return_unchecked(&global_seq)));
67205
67206 if (data_len) {
67207 struct nlattr *nla;
67208diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67209--- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67210+++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67211@@ -58,7 +58,7 @@ struct nfqnl_instance {
67212 */
67213 spinlock_t lock;
67214 unsigned int queue_total;
67215- atomic_t id_sequence; /* 'sequence' of pkt ids */
67216+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67217 struct list_head queue_list; /* packets in queue */
67218 };
67219
67220@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67221 nfmsg->version = NFNETLINK_V0;
67222 nfmsg->res_id = htons(queue->queue_num);
67223
67224- entry->id = atomic_inc_return(&queue->id_sequence);
67225+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67226 pmsg.packet_id = htonl(entry->id);
67227 pmsg.hw_protocol = entskb->protocol;
67228 pmsg.hook = entry->hook;
67229@@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67230 inst->peer_pid, inst->queue_total,
67231 inst->copy_mode, inst->copy_range,
67232 inst->queue_dropped, inst->queue_user_dropped,
67233- atomic_read(&inst->id_sequence), 1);
67234+ atomic_read_unchecked(&inst->id_sequence), 1);
67235 }
67236
67237 static const struct seq_operations nfqnl_seq_ops = {
67238diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67239--- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67240+++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67241@@ -0,0 +1,51 @@
67242+/*
67243+ * gradm match for netfilter
67244