]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.4-201109171832.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201109171832.patch
CommitLineData
5249dc98
PK
1diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2--- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40--- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86--- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245--- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286--- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344--- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358--- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382--- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404--- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430--- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456--- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536--- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587--- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639--- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671--- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757--- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837--- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975--- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039--- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062--- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085--- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109--- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121--- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166--- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185--- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212--- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276--- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314--- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447--- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461--- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715--- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773--- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033--- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075--- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194--- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278--- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411--- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480--- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508--- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555--- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629--- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672--- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684--- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733--- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745--- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757--- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029--- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059+++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366--- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378--- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416--- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798+++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046--- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068--- linux-3.0.4/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069+++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080--- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092--- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399--- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971--- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008--- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020--- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048--- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061--- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112--- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133--- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435--- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533--- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545--- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558--- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570--- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582--- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S
5593--- linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-09-17 18:31:51.000000000 -0400
5595@@ -71,6 +71,12 @@ FUNC: movq r1,r2; \
5596 je B192; \
5597 leaq 32(r9),r9;
5598
5599+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5600+#define ret orb $0x80, 0x7(%rsp); ret
5601+#else
5602+#define ret ret
5603+#endif
5604+
5605 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5606 movq r1,r2; \
5607 movq r3,r4; \
5608diff -urNp linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S
5609--- linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5610+++ linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-09-17 18:31:51.000000000 -0400
5611@@ -790,6 +790,9 @@ ECRYPT_encrypt_bytes:
5612 add %r11,%rsp
5613 mov %rdi,%rax
5614 mov %rsi,%rdx
5615+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5616+ orb $0x80, 0x7(%rsp)
5617+#endif
5618 ret
5619 # bytesatleast65:
5620 ._bytesatleast65:
5621@@ -891,6 +894,9 @@ ECRYPT_keysetup:
5622 add %r11,%rsp
5623 mov %rdi,%rax
5624 mov %rsi,%rdx
5625+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5626+ orb $0x80, 0x7(%rsp)
5627+#endif
5628 ret
5629 # enter ECRYPT_ivsetup
5630 .text
5631@@ -917,4 +923,7 @@ ECRYPT_ivsetup:
5632 add %r11,%rsp
5633 mov %rdi,%rax
5634 mov %rsi,%rdx
5635+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5636+ orb $0x80, 0x7(%rsp)
5637+#endif
5638 ret
5639diff -urNp linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S
5640--- linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5641+++ linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-09-17 18:31:51.000000000 -0400
5642@@ -269,6 +269,9 @@ twofish_enc_blk:
5643
5644 popq R1
5645 movq $1,%rax
5646+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5647+ orb $0x80, 0x7(%rsp)
5648+#endif
5649 ret
5650
5651 twofish_dec_blk:
5652@@ -321,4 +324,7 @@ twofish_dec_blk:
5653
5654 popq R1
5655 movq $1,%rax
5656+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5657+ orb $0x80, 0x7(%rsp)
5658+#endif
5659 ret
5660diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5661--- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5662+++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5663@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5664 unsigned long dump_start, dump_size;
5665 struct user32 dump;
5666
5667+ memset(&dump, 0, sizeof(dump));
5668+
5669 fs = get_fs();
5670 set_fs(KERNEL_DS);
5671 has_dumped = 1;
5672diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5673--- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5674+++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5675@@ -13,6 +13,7 @@
5676 #include <asm/thread_info.h>
5677 #include <asm/segment.h>
5678 #include <asm/irqflags.h>
5679+#include <asm/pgtable.h>
5680 #include <linux/linkage.h>
5681
5682 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5683@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5684 ENDPROC(native_irq_enable_sysexit)
5685 #endif
5686
5687+ .macro pax_enter_kernel_user
5688+#ifdef CONFIG_PAX_MEMORY_UDEREF
5689+ call pax_enter_kernel_user
5690+#endif
5691+ .endm
5692+
5693+ .macro pax_exit_kernel_user
5694+#ifdef CONFIG_PAX_MEMORY_UDEREF
5695+ call pax_exit_kernel_user
5696+#endif
5697+#ifdef CONFIG_PAX_RANDKSTACK
5698+ pushq %rax
5699+ call pax_randomize_kstack
5700+ popq %rax
5701+#endif
5702+ .endm
5703+
5704+ .macro pax_erase_kstack
5705+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5706+ call pax_erase_kstack
5707+#endif
5708+ .endm
5709+
5710 /*
5711 * 32bit SYSENTER instruction entry.
5712 *
5713@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5714 CFI_REGISTER rsp,rbp
5715 SWAPGS_UNSAFE_STACK
5716 movq PER_CPU_VAR(kernel_stack), %rsp
5717- addq $(KERNEL_STACK_OFFSET),%rsp
5718+ pax_enter_kernel_user
5719 /*
5720 * No need to follow this irqs on/off section: the syscall
5721 * disabled irqs, here we enable it straight after entry:
5722@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5723 CFI_REL_OFFSET rsp,0
5724 pushfq_cfi
5725 /*CFI_REL_OFFSET rflags,0*/
5726- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5727+ GET_THREAD_INFO(%r10)
5728+ movl TI_sysenter_return(%r10), %r10d
5729 CFI_REGISTER rip,r10
5730 pushq_cfi $__USER32_CS
5731 /*CFI_REL_OFFSET cs,0*/
5732@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5733 SAVE_ARGS 0,0,1
5734 /* no need to do an access_ok check here because rbp has been
5735 32bit zero extended */
5736+
5737+#ifdef CONFIG_PAX_MEMORY_UDEREF
5738+ mov $PAX_USER_SHADOW_BASE,%r10
5739+ add %r10,%rbp
5740+#endif
5741+
5742 1: movl (%rbp),%ebp
5743 .section __ex_table,"a"
5744 .quad 1b,ia32_badarg
5745@@ -168,6 +199,8 @@ sysenter_dispatch:
5746 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5747 jnz sysexit_audit
5748 sysexit_from_sys_call:
5749+ pax_exit_kernel_user
5750+ pax_erase_kstack
5751 andl $~TS_COMPAT,TI_status(%r10)
5752 /* clear IF, that popfq doesn't enable interrupts early */
5753 andl $~0x200,EFLAGS-R11(%rsp)
5754@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5755 movl %eax,%esi /* 2nd arg: syscall number */
5756 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5757 call audit_syscall_entry
5758+
5759+ pax_erase_kstack
5760+
5761 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5762 cmpq $(IA32_NR_syscalls-1),%rax
5763 ja ia32_badsys
5764@@ -246,6 +282,9 @@ sysenter_tracesys:
5765 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5766 movq %rsp,%rdi /* &pt_regs -> arg1 */
5767 call syscall_trace_enter
5768+
5769+ pax_erase_kstack
5770+
5771 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5772 RESTORE_REST
5773 cmpq $(IA32_NR_syscalls-1),%rax
5774@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5775 ENTRY(ia32_cstar_target)
5776 CFI_STARTPROC32 simple
5777 CFI_SIGNAL_FRAME
5778- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5779+ CFI_DEF_CFA rsp,0
5780 CFI_REGISTER rip,rcx
5781 /*CFI_REGISTER rflags,r11*/
5782 SWAPGS_UNSAFE_STACK
5783 movl %esp,%r8d
5784 CFI_REGISTER rsp,r8
5785 movq PER_CPU_VAR(kernel_stack),%rsp
5786+
5787+#ifdef CONFIG_PAX_MEMORY_UDEREF
5788+ pax_enter_kernel_user
5789+#endif
5790+
5791 /*
5792 * No need to follow this irqs on/off section: the syscall
5793 * disabled irqs and here we enable it straight after entry:
5794 */
5795 ENABLE_INTERRUPTS(CLBR_NONE)
5796- SAVE_ARGS 8,1,1
5797+ SAVE_ARGS 8*6,1,1
5798 movl %eax,%eax /* zero extension */
5799 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5800 movq %rcx,RIP-ARGOFFSET(%rsp)
5801@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5802 /* no need to do an access_ok check here because r8 has been
5803 32bit zero extended */
5804 /* hardware stack frame is complete now */
5805+
5806+#ifdef CONFIG_PAX_MEMORY_UDEREF
5807+ mov $PAX_USER_SHADOW_BASE,%r10
5808+ add %r10,%r8
5809+#endif
5810+
5811 1: movl (%r8),%r9d
5812 .section __ex_table,"a"
5813 .quad 1b,ia32_badarg
5814@@ -327,6 +377,8 @@ cstar_dispatch:
5815 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5816 jnz sysretl_audit
5817 sysretl_from_sys_call:
5818+ pax_exit_kernel_user
5819+ pax_erase_kstack
5820 andl $~TS_COMPAT,TI_status(%r10)
5821 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5822 movl RIP-ARGOFFSET(%rsp),%ecx
5823@@ -364,6 +416,9 @@ cstar_tracesys:
5824 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5825 movq %rsp,%rdi /* &pt_regs -> arg1 */
5826 call syscall_trace_enter
5827+
5828+ pax_erase_kstack
5829+
5830 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5831 RESTORE_REST
5832 xchgl %ebp,%r9d
5833@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5834 CFI_REL_OFFSET rip,RIP-RIP
5835 PARAVIRT_ADJUST_EXCEPTION_FRAME
5836 SWAPGS
5837+ pax_enter_kernel_user
5838 /*
5839 * No need to follow this irqs on/off section: the syscall
5840 * disabled irqs and here we enable it straight after entry:
5841@@ -441,6 +497,9 @@ ia32_tracesys:
5842 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5843 movq %rsp,%rdi /* &pt_regs -> arg1 */
5844 call syscall_trace_enter
5845+
5846+ pax_erase_kstack
5847+
5848 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5849 RESTORE_REST
5850 cmpq $(IA32_NR_syscalls-1),%rax
5851diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5852--- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5853+++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5854@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5855 sp -= frame_size;
5856 /* Align the stack pointer according to the i386 ABI,
5857 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5858- sp = ((sp + 4) & -16ul) - 4;
5859+ sp = ((sp - 12) & -16ul) - 4;
5860 return (void __user *) sp;
5861 }
5862
5863@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5864 * These are actually not used anymore, but left because some
5865 * gdb versions depend on them as a marker.
5866 */
5867- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5868+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5869 } put_user_catch(err);
5870
5871 if (err)
5872@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5873 0xb8,
5874 __NR_ia32_rt_sigreturn,
5875 0x80cd,
5876- 0,
5877+ 0
5878 };
5879
5880 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5881@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5882
5883 if (ka->sa.sa_flags & SA_RESTORER)
5884 restorer = ka->sa.sa_restorer;
5885+ else if (current->mm->context.vdso)
5886+ /* Return stub is in 32bit vsyscall page */
5887+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5888 else
5889- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5890- rt_sigreturn);
5891+ restorer = &frame->retcode;
5892 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5893
5894 /*
5895 * Not actually used anymore, but left because some gdb
5896 * versions need it.
5897 */
5898- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5899+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5900 } put_user_catch(err);
5901
5902 if (err)
5903diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5904--- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5905+++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5906@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5907 ".section .discard,\"aw\",@progbits\n" \
5908 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5909 ".previous\n" \
5910- ".section .altinstr_replacement, \"ax\"\n" \
5911+ ".section .altinstr_replacement, \"a\"\n" \
5912 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5913 ".previous"
5914
5915diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5916--- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5917+++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5918@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5919
5920 #ifdef CONFIG_X86_LOCAL_APIC
5921
5922-extern unsigned int apic_verbosity;
5923+extern int apic_verbosity;
5924 extern int local_apic_timer_c2_ok;
5925
5926 extern int disable_apic;
5927diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5928--- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5929+++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5930@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5931 __asm__ __volatile__(APM_DO_ZERO_SEGS
5932 "pushl %%edi\n\t"
5933 "pushl %%ebp\n\t"
5934- "lcall *%%cs:apm_bios_entry\n\t"
5935+ "lcall *%%ss:apm_bios_entry\n\t"
5936 "setc %%al\n\t"
5937 "popl %%ebp\n\t"
5938 "popl %%edi\n\t"
5939@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5940 __asm__ __volatile__(APM_DO_ZERO_SEGS
5941 "pushl %%edi\n\t"
5942 "pushl %%ebp\n\t"
5943- "lcall *%%cs:apm_bios_entry\n\t"
5944+ "lcall *%%ss:apm_bios_entry\n\t"
5945 "setc %%bl\n\t"
5946 "popl %%ebp\n\t"
5947 "popl %%edi\n\t"
5948diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5949--- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5950+++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5951@@ -12,6 +12,14 @@ typedef struct {
5952 u64 __aligned(8) counter;
5953 } atomic64_t;
5954
5955+#ifdef CONFIG_PAX_REFCOUNT
5956+typedef struct {
5957+ u64 __aligned(8) counter;
5958+} atomic64_unchecked_t;
5959+#else
5960+typedef atomic64_t atomic64_unchecked_t;
5961+#endif
5962+
5963 #define ATOMIC64_INIT(val) { (val) }
5964
5965 #ifdef CONFIG_X86_CMPXCHG64
5966@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5967 }
5968
5969 /**
5970+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5971+ * @p: pointer to type atomic64_unchecked_t
5972+ * @o: expected value
5973+ * @n: new value
5974+ *
5975+ * Atomically sets @v to @n if it was equal to @o and returns
5976+ * the old value.
5977+ */
5978+
5979+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5980+{
5981+ return cmpxchg64(&v->counter, o, n);
5982+}
5983+
5984+/**
5985 * atomic64_xchg - xchg atomic64 variable
5986 * @v: pointer to type atomic64_t
5987 * @n: value to assign
5988@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5989 }
5990
5991 /**
5992+ * atomic64_set_unchecked - set atomic64 variable
5993+ * @v: pointer to type atomic64_unchecked_t
5994+ * @n: value to assign
5995+ *
5996+ * Atomically sets the value of @v to @n.
5997+ */
5998+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5999+{
6000+ unsigned high = (unsigned)(i >> 32);
6001+ unsigned low = (unsigned)i;
6002+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6003+ : "+b" (low), "+c" (high)
6004+ : "S" (v)
6005+ : "eax", "edx", "memory"
6006+ );
6007+}
6008+
6009+/**
6010 * atomic64_read - read atomic64 variable
6011 * @v: pointer to type atomic64_t
6012 *
6013@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6014 }
6015
6016 /**
6017+ * atomic64_read_unchecked - read atomic64 variable
6018+ * @v: pointer to type atomic64_unchecked_t
6019+ *
6020+ * Atomically reads the value of @v and returns it.
6021+ */
6022+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6023+{
6024+ long long r;
6025+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6026+ : "=A" (r), "+c" (v)
6027+ : : "memory"
6028+ );
6029+ return r;
6030+ }
6031+
6032+/**
6033 * atomic64_add_return - add and return
6034 * @i: integer value to add
6035 * @v: pointer to type atomic64_t
6036@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6037 return i;
6038 }
6039
6040+/**
6041+ * atomic64_add_return_unchecked - add and return
6042+ * @i: integer value to add
6043+ * @v: pointer to type atomic64_unchecked_t
6044+ *
6045+ * Atomically adds @i to @v and returns @i + *@v
6046+ */
6047+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6048+{
6049+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6050+ : "+A" (i), "+c" (v)
6051+ : : "memory"
6052+ );
6053+ return i;
6054+}
6055+
6056 /*
6057 * Other variants with different arithmetic operators:
6058 */
6059@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6060 return a;
6061 }
6062
6063+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6064+{
6065+ long long a;
6066+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6067+ : "=A" (a)
6068+ : "S" (v)
6069+ : "memory", "ecx"
6070+ );
6071+ return a;
6072+}
6073+
6074 static inline long long atomic64_dec_return(atomic64_t *v)
6075 {
6076 long long a;
6077@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6078 }
6079
6080 /**
6081+ * atomic64_add_unchecked - add integer to atomic64 variable
6082+ * @i: integer value to add
6083+ * @v: pointer to type atomic64_unchecked_t
6084+ *
6085+ * Atomically adds @i to @v.
6086+ */
6087+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6088+{
6089+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6090+ : "+A" (i), "+c" (v)
6091+ : : "memory"
6092+ );
6093+ return i;
6094+}
6095+
6096+/**
6097 * atomic64_sub - subtract the atomic64 variable
6098 * @i: integer value to subtract
6099 * @v: pointer to type atomic64_t
6100diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6101--- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6102+++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6103@@ -18,7 +18,19 @@
6104 */
6105 static inline long atomic64_read(const atomic64_t *v)
6106 {
6107- return (*(volatile long *)&(v)->counter);
6108+ return (*(volatile const long *)&(v)->counter);
6109+}
6110+
6111+/**
6112+ * atomic64_read_unchecked - read atomic64 variable
6113+ * @v: pointer of type atomic64_unchecked_t
6114+ *
6115+ * Atomically reads the value of @v.
6116+ * Doesn't imply a read memory barrier.
6117+ */
6118+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6119+{
6120+ return (*(volatile const long *)&(v)->counter);
6121 }
6122
6123 /**
6124@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6125 }
6126
6127 /**
6128+ * atomic64_set_unchecked - set atomic64 variable
6129+ * @v: pointer to type atomic64_unchecked_t
6130+ * @i: required value
6131+ *
6132+ * Atomically sets the value of @v to @i.
6133+ */
6134+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6135+{
6136+ v->counter = i;
6137+}
6138+
6139+/**
6140 * atomic64_add - add integer to atomic64 variable
6141 * @i: integer value to add
6142 * @v: pointer to type atomic64_t
6143@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6144 */
6145 static inline void atomic64_add(long i, atomic64_t *v)
6146 {
6147+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6148+
6149+#ifdef CONFIG_PAX_REFCOUNT
6150+ "jno 0f\n"
6151+ LOCK_PREFIX "subq %1,%0\n"
6152+ "int $4\n0:\n"
6153+ _ASM_EXTABLE(0b, 0b)
6154+#endif
6155+
6156+ : "=m" (v->counter)
6157+ : "er" (i), "m" (v->counter));
6158+}
6159+
6160+/**
6161+ * atomic64_add_unchecked - add integer to atomic64 variable
6162+ * @i: integer value to add
6163+ * @v: pointer to type atomic64_unchecked_t
6164+ *
6165+ * Atomically adds @i to @v.
6166+ */
6167+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6168+{
6169 asm volatile(LOCK_PREFIX "addq %1,%0"
6170 : "=m" (v->counter)
6171 : "er" (i), "m" (v->counter));
6172@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6173 */
6174 static inline void atomic64_sub(long i, atomic64_t *v)
6175 {
6176- asm volatile(LOCK_PREFIX "subq %1,%0"
6177+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6178+
6179+#ifdef CONFIG_PAX_REFCOUNT
6180+ "jno 0f\n"
6181+ LOCK_PREFIX "addq %1,%0\n"
6182+ "int $4\n0:\n"
6183+ _ASM_EXTABLE(0b, 0b)
6184+#endif
6185+
6186+ : "=m" (v->counter)
6187+ : "er" (i), "m" (v->counter));
6188+}
6189+
6190+/**
6191+ * atomic64_sub_unchecked - subtract the atomic64 variable
6192+ * @i: integer value to subtract
6193+ * @v: pointer to type atomic64_unchecked_t
6194+ *
6195+ * Atomically subtracts @i from @v.
6196+ */
6197+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6198+{
6199+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6200 : "=m" (v->counter)
6201 : "er" (i), "m" (v->counter));
6202 }
6203@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6204 {
6205 unsigned char c;
6206
6207- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6208+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6209+
6210+#ifdef CONFIG_PAX_REFCOUNT
6211+ "jno 0f\n"
6212+ LOCK_PREFIX "addq %2,%0\n"
6213+ "int $4\n0:\n"
6214+ _ASM_EXTABLE(0b, 0b)
6215+#endif
6216+
6217+ "sete %1\n"
6218 : "=m" (v->counter), "=qm" (c)
6219 : "er" (i), "m" (v->counter) : "memory");
6220 return c;
6221@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6222 */
6223 static inline void atomic64_inc(atomic64_t *v)
6224 {
6225+ asm volatile(LOCK_PREFIX "incq %0\n"
6226+
6227+#ifdef CONFIG_PAX_REFCOUNT
6228+ "jno 0f\n"
6229+ LOCK_PREFIX "decq %0\n"
6230+ "int $4\n0:\n"
6231+ _ASM_EXTABLE(0b, 0b)
6232+#endif
6233+
6234+ : "=m" (v->counter)
6235+ : "m" (v->counter));
6236+}
6237+
6238+/**
6239+ * atomic64_inc_unchecked - increment atomic64 variable
6240+ * @v: pointer to type atomic64_unchecked_t
6241+ *
6242+ * Atomically increments @v by 1.
6243+ */
6244+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6245+{
6246 asm volatile(LOCK_PREFIX "incq %0"
6247 : "=m" (v->counter)
6248 : "m" (v->counter));
6249@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6250 */
6251 static inline void atomic64_dec(atomic64_t *v)
6252 {
6253- asm volatile(LOCK_PREFIX "decq %0"
6254+ asm volatile(LOCK_PREFIX "decq %0\n"
6255+
6256+#ifdef CONFIG_PAX_REFCOUNT
6257+ "jno 0f\n"
6258+ LOCK_PREFIX "incq %0\n"
6259+ "int $4\n0:\n"
6260+ _ASM_EXTABLE(0b, 0b)
6261+#endif
6262+
6263+ : "=m" (v->counter)
6264+ : "m" (v->counter));
6265+}
6266+
6267+/**
6268+ * atomic64_dec_unchecked - decrement atomic64 variable
6269+ * @v: pointer to type atomic64_t
6270+ *
6271+ * Atomically decrements @v by 1.
6272+ */
6273+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6274+{
6275+ asm volatile(LOCK_PREFIX "decq %0\n"
6276 : "=m" (v->counter)
6277 : "m" (v->counter));
6278 }
6279@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6280 {
6281 unsigned char c;
6282
6283- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6284+ asm volatile(LOCK_PREFIX "decq %0\n"
6285+
6286+#ifdef CONFIG_PAX_REFCOUNT
6287+ "jno 0f\n"
6288+ LOCK_PREFIX "incq %0\n"
6289+ "int $4\n0:\n"
6290+ _ASM_EXTABLE(0b, 0b)
6291+#endif
6292+
6293+ "sete %1\n"
6294 : "=m" (v->counter), "=qm" (c)
6295 : "m" (v->counter) : "memory");
6296 return c != 0;
6297@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6298 {
6299 unsigned char c;
6300
6301- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6302+ asm volatile(LOCK_PREFIX "incq %0\n"
6303+
6304+#ifdef CONFIG_PAX_REFCOUNT
6305+ "jno 0f\n"
6306+ LOCK_PREFIX "decq %0\n"
6307+ "int $4\n0:\n"
6308+ _ASM_EXTABLE(0b, 0b)
6309+#endif
6310+
6311+ "sete %1\n"
6312 : "=m" (v->counter), "=qm" (c)
6313 : "m" (v->counter) : "memory");
6314 return c != 0;
6315@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6316 {
6317 unsigned char c;
6318
6319- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6320+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6321+
6322+#ifdef CONFIG_PAX_REFCOUNT
6323+ "jno 0f\n"
6324+ LOCK_PREFIX "subq %2,%0\n"
6325+ "int $4\n0:\n"
6326+ _ASM_EXTABLE(0b, 0b)
6327+#endif
6328+
6329+ "sets %1\n"
6330 : "=m" (v->counter), "=qm" (c)
6331 : "er" (i), "m" (v->counter) : "memory");
6332 return c;
6333@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6334 static inline long atomic64_add_return(long i, atomic64_t *v)
6335 {
6336 long __i = i;
6337- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6338+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6339+
6340+#ifdef CONFIG_PAX_REFCOUNT
6341+ "jno 0f\n"
6342+ "movq %0, %1\n"
6343+ "int $4\n0:\n"
6344+ _ASM_EXTABLE(0b, 0b)
6345+#endif
6346+
6347+ : "+r" (i), "+m" (v->counter)
6348+ : : "memory");
6349+ return i + __i;
6350+}
6351+
6352+/**
6353+ * atomic64_add_return_unchecked - add and return
6354+ * @i: integer value to add
6355+ * @v: pointer to type atomic64_unchecked_t
6356+ *
6357+ * Atomically adds @i to @v and returns @i + @v
6358+ */
6359+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6360+{
6361+ long __i = i;
6362+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6363 : "+r" (i), "+m" (v->counter)
6364 : : "memory");
6365 return i + __i;
6366@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6367 }
6368
6369 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6370+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6371+{
6372+ return atomic64_add_return_unchecked(1, v);
6373+}
6374 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6375
6376 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6377@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6378 return cmpxchg(&v->counter, old, new);
6379 }
6380
6381+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6382+{
6383+ return cmpxchg(&v->counter, old, new);
6384+}
6385+
6386 static inline long atomic64_xchg(atomic64_t *v, long new)
6387 {
6388 return xchg(&v->counter, new);
6389@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6390 */
6391 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6392 {
6393- long c, old;
6394+ long c, old, new;
6395 c = atomic64_read(v);
6396 for (;;) {
6397- if (unlikely(c == (u)))
6398+ if (unlikely(c == u))
6399 break;
6400- old = atomic64_cmpxchg((v), c, c + (a));
6401+
6402+ asm volatile("add %2,%0\n"
6403+
6404+#ifdef CONFIG_PAX_REFCOUNT
6405+ "jno 0f\n"
6406+ "sub %2,%0\n"
6407+ "int $4\n0:\n"
6408+ _ASM_EXTABLE(0b, 0b)
6409+#endif
6410+
6411+ : "=r" (new)
6412+ : "0" (c), "ir" (a));
6413+
6414+ old = atomic64_cmpxchg(v, c, new);
6415 if (likely(old == c))
6416 break;
6417 c = old;
6418 }
6419- return c != (u);
6420+ return c != u;
6421 }
6422
6423 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6424diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6425--- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6426+++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6427@@ -22,7 +22,18 @@
6428 */
6429 static inline int atomic_read(const atomic_t *v)
6430 {
6431- return (*(volatile int *)&(v)->counter);
6432+ return (*(volatile const int *)&(v)->counter);
6433+}
6434+
6435+/**
6436+ * atomic_read_unchecked - read atomic variable
6437+ * @v: pointer of type atomic_unchecked_t
6438+ *
6439+ * Atomically reads the value of @v.
6440+ */
6441+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6442+{
6443+ return (*(volatile const int *)&(v)->counter);
6444 }
6445
6446 /**
6447@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6448 }
6449
6450 /**
6451+ * atomic_set_unchecked - set atomic variable
6452+ * @v: pointer of type atomic_unchecked_t
6453+ * @i: required value
6454+ *
6455+ * Atomically sets the value of @v to @i.
6456+ */
6457+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6458+{
6459+ v->counter = i;
6460+}
6461+
6462+/**
6463 * atomic_add - add integer to atomic variable
6464 * @i: integer value to add
6465 * @v: pointer of type atomic_t
6466@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6467 */
6468 static inline void atomic_add(int i, atomic_t *v)
6469 {
6470- asm volatile(LOCK_PREFIX "addl %1,%0"
6471+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6472+
6473+#ifdef CONFIG_PAX_REFCOUNT
6474+ "jno 0f\n"
6475+ LOCK_PREFIX "subl %1,%0\n"
6476+ "int $4\n0:\n"
6477+ _ASM_EXTABLE(0b, 0b)
6478+#endif
6479+
6480+ : "+m" (v->counter)
6481+ : "ir" (i));
6482+}
6483+
6484+/**
6485+ * atomic_add_unchecked - add integer to atomic variable
6486+ * @i: integer value to add
6487+ * @v: pointer of type atomic_unchecked_t
6488+ *
6489+ * Atomically adds @i to @v.
6490+ */
6491+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6492+{
6493+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6494 : "+m" (v->counter)
6495 : "ir" (i));
6496 }
6497@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6498 */
6499 static inline void atomic_sub(int i, atomic_t *v)
6500 {
6501- asm volatile(LOCK_PREFIX "subl %1,%0"
6502+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6503+
6504+#ifdef CONFIG_PAX_REFCOUNT
6505+ "jno 0f\n"
6506+ LOCK_PREFIX "addl %1,%0\n"
6507+ "int $4\n0:\n"
6508+ _ASM_EXTABLE(0b, 0b)
6509+#endif
6510+
6511+ : "+m" (v->counter)
6512+ : "ir" (i));
6513+}
6514+
6515+/**
6516+ * atomic_sub_unchecked - subtract integer from atomic variable
6517+ * @i: integer value to subtract
6518+ * @v: pointer of type atomic_unchecked_t
6519+ *
6520+ * Atomically subtracts @i from @v.
6521+ */
6522+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6523+{
6524+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6525 : "+m" (v->counter)
6526 : "ir" (i));
6527 }
6528@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6529 {
6530 unsigned char c;
6531
6532- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6533+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6534+
6535+#ifdef CONFIG_PAX_REFCOUNT
6536+ "jno 0f\n"
6537+ LOCK_PREFIX "addl %2,%0\n"
6538+ "int $4\n0:\n"
6539+ _ASM_EXTABLE(0b, 0b)
6540+#endif
6541+
6542+ "sete %1\n"
6543 : "+m" (v->counter), "=qm" (c)
6544 : "ir" (i) : "memory");
6545 return c;
6546@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6547 */
6548 static inline void atomic_inc(atomic_t *v)
6549 {
6550- asm volatile(LOCK_PREFIX "incl %0"
6551+ asm volatile(LOCK_PREFIX "incl %0\n"
6552+
6553+#ifdef CONFIG_PAX_REFCOUNT
6554+ "jno 0f\n"
6555+ LOCK_PREFIX "decl %0\n"
6556+ "int $4\n0:\n"
6557+ _ASM_EXTABLE(0b, 0b)
6558+#endif
6559+
6560+ : "+m" (v->counter));
6561+}
6562+
6563+/**
6564+ * atomic_inc_unchecked - increment atomic variable
6565+ * @v: pointer of type atomic_unchecked_t
6566+ *
6567+ * Atomically increments @v by 1.
6568+ */
6569+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6570+{
6571+ asm volatile(LOCK_PREFIX "incl %0\n"
6572 : "+m" (v->counter));
6573 }
6574
6575@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6576 */
6577 static inline void atomic_dec(atomic_t *v)
6578 {
6579- asm volatile(LOCK_PREFIX "decl %0"
6580+ asm volatile(LOCK_PREFIX "decl %0\n"
6581+
6582+#ifdef CONFIG_PAX_REFCOUNT
6583+ "jno 0f\n"
6584+ LOCK_PREFIX "incl %0\n"
6585+ "int $4\n0:\n"
6586+ _ASM_EXTABLE(0b, 0b)
6587+#endif
6588+
6589+ : "+m" (v->counter));
6590+}
6591+
6592+/**
6593+ * atomic_dec_unchecked - decrement atomic variable
6594+ * @v: pointer of type atomic_unchecked_t
6595+ *
6596+ * Atomically decrements @v by 1.
6597+ */
6598+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6599+{
6600+ asm volatile(LOCK_PREFIX "decl %0\n"
6601 : "+m" (v->counter));
6602 }
6603
6604@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6605 {
6606 unsigned char c;
6607
6608- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6609+ asm volatile(LOCK_PREFIX "decl %0\n"
6610+
6611+#ifdef CONFIG_PAX_REFCOUNT
6612+ "jno 0f\n"
6613+ LOCK_PREFIX "incl %0\n"
6614+ "int $4\n0:\n"
6615+ _ASM_EXTABLE(0b, 0b)
6616+#endif
6617+
6618+ "sete %1\n"
6619 : "+m" (v->counter), "=qm" (c)
6620 : : "memory");
6621 return c != 0;
6622@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6623 {
6624 unsigned char c;
6625
6626- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6627+ asm volatile(LOCK_PREFIX "incl %0\n"
6628+
6629+#ifdef CONFIG_PAX_REFCOUNT
6630+ "jno 0f\n"
6631+ LOCK_PREFIX "decl %0\n"
6632+ "int $4\n0:\n"
6633+ _ASM_EXTABLE(0b, 0b)
6634+#endif
6635+
6636+ "sete %1\n"
6637+ : "+m" (v->counter), "=qm" (c)
6638+ : : "memory");
6639+ return c != 0;
6640+}
6641+
6642+/**
6643+ * atomic_inc_and_test_unchecked - increment and test
6644+ * @v: pointer of type atomic_unchecked_t
6645+ *
6646+ * Atomically increments @v by 1
6647+ * and returns true if the result is zero, or false for all
6648+ * other cases.
6649+ */
6650+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6651+{
6652+ unsigned char c;
6653+
6654+ asm volatile(LOCK_PREFIX "incl %0\n"
6655+ "sete %1\n"
6656 : "+m" (v->counter), "=qm" (c)
6657 : : "memory");
6658 return c != 0;
6659@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6660 {
6661 unsigned char c;
6662
6663- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6664+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6665+
6666+#ifdef CONFIG_PAX_REFCOUNT
6667+ "jno 0f\n"
6668+ LOCK_PREFIX "subl %2,%0\n"
6669+ "int $4\n0:\n"
6670+ _ASM_EXTABLE(0b, 0b)
6671+#endif
6672+
6673+ "sets %1\n"
6674 : "+m" (v->counter), "=qm" (c)
6675 : "ir" (i) : "memory");
6676 return c;
6677@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6678 #endif
6679 /* Modern 486+ processor */
6680 __i = i;
6681+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6682+
6683+#ifdef CONFIG_PAX_REFCOUNT
6684+ "jno 0f\n"
6685+ "movl %0, %1\n"
6686+ "int $4\n0:\n"
6687+ _ASM_EXTABLE(0b, 0b)
6688+#endif
6689+
6690+ : "+r" (i), "+m" (v->counter)
6691+ : : "memory");
6692+ return i + __i;
6693+
6694+#ifdef CONFIG_M386
6695+no_xadd: /* Legacy 386 processor */
6696+ local_irq_save(flags);
6697+ __i = atomic_read(v);
6698+ atomic_set(v, i + __i);
6699+ local_irq_restore(flags);
6700+ return i + __i;
6701+#endif
6702+}
6703+
6704+/**
6705+ * atomic_add_return_unchecked - add integer and return
6706+ * @v: pointer of type atomic_unchecked_t
6707+ * @i: integer value to add
6708+ *
6709+ * Atomically adds @i to @v and returns @i + @v
6710+ */
6711+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6712+{
6713+ int __i;
6714+#ifdef CONFIG_M386
6715+ unsigned long flags;
6716+ if (unlikely(boot_cpu_data.x86 <= 3))
6717+ goto no_xadd;
6718+#endif
6719+ /* Modern 486+ processor */
6720+ __i = i;
6721 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6722 : "+r" (i), "+m" (v->counter)
6723 : : "memory");
6724@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6725 }
6726
6727 #define atomic_inc_return(v) (atomic_add_return(1, v))
6728+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6729+{
6730+ return atomic_add_return_unchecked(1, v);
6731+}
6732 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6733
6734 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6735@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6736 return cmpxchg(&v->counter, old, new);
6737 }
6738
6739+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6740+{
6741+ return cmpxchg(&v->counter, old, new);
6742+}
6743+
6744 static inline int atomic_xchg(atomic_t *v, int new)
6745 {
6746 return xchg(&v->counter, new);
6747 }
6748
6749+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6750+{
6751+ return xchg(&v->counter, new);
6752+}
6753+
6754 /**
6755 * atomic_add_unless - add unless the number is already a given value
6756 * @v: pointer of type atomic_t
6757@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6758 */
6759 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6760 {
6761- int c, old;
6762+ int c, old, new;
6763 c = atomic_read(v);
6764 for (;;) {
6765- if (unlikely(c == (u)))
6766+ if (unlikely(c == u))
6767 break;
6768- old = atomic_cmpxchg((v), c, c + (a));
6769+
6770+ asm volatile("addl %2,%0\n"
6771+
6772+#ifdef CONFIG_PAX_REFCOUNT
6773+ "jno 0f\n"
6774+ "subl %2,%0\n"
6775+ "int $4\n0:\n"
6776+ _ASM_EXTABLE(0b, 0b)
6777+#endif
6778+
6779+ : "=r" (new)
6780+ : "0" (c), "ir" (a));
6781+
6782+ old = atomic_cmpxchg(v, c, new);
6783 if (likely(old == c))
6784 break;
6785 c = old;
6786 }
6787- return c != (u);
6788+ return c != u;
6789 }
6790
6791 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6792
6793+/**
6794+ * atomic_inc_not_zero_hint - increment if not null
6795+ * @v: pointer of type atomic_t
6796+ * @hint: probable value of the atomic before the increment
6797+ *
6798+ * This version of atomic_inc_not_zero() gives a hint of probable
6799+ * value of the atomic. This helps processor to not read the memory
6800+ * before doing the atomic read/modify/write cycle, lowering
6801+ * number of bus transactions on some arches.
6802+ *
6803+ * Returns: 0 if increment was not done, 1 otherwise.
6804+ */
6805+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6806+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6807+{
6808+ int val, c = hint, new;
6809+
6810+ /* sanity test, should be removed by compiler if hint is a constant */
6811+ if (!hint)
6812+ return atomic_inc_not_zero(v);
6813+
6814+ do {
6815+ asm volatile("incl %0\n"
6816+
6817+#ifdef CONFIG_PAX_REFCOUNT
6818+ "jno 0f\n"
6819+ "decl %0\n"
6820+ "int $4\n0:\n"
6821+ _ASM_EXTABLE(0b, 0b)
6822+#endif
6823+
6824+ : "=r" (new)
6825+ : "0" (c));
6826+
6827+ val = atomic_cmpxchg(v, c, new);
6828+ if (val == c)
6829+ return 1;
6830+ c = val;
6831+ } while (c);
6832+
6833+ return 0;
6834+}
6835+
6836 /*
6837 * atomic_dec_if_positive - decrement by 1 if old value positive
6838 * @v: pointer of type atomic_t
6839diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6840--- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6841+++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6842@@ -38,7 +38,7 @@
6843 * a mask operation on a byte.
6844 */
6845 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6846-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6847+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6848 #define CONST_MASK(nr) (1 << ((nr) & 7))
6849
6850 /**
6851diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6852--- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6853+++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6854@@ -11,10 +11,15 @@
6855 #include <asm/pgtable_types.h>
6856
6857 /* Physical address where kernel should be loaded. */
6858-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6859+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6860 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6861 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6862
6863+#ifndef __ASSEMBLY__
6864+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6865+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6866+#endif
6867+
6868 /* Minimum kernel alignment, as a power of two */
6869 #ifdef CONFIG_X86_64
6870 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6871diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6872--- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6873+++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6874@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6875 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6876
6877 if (pg_flags == _PGMT_DEFAULT)
6878- return -1;
6879+ return ~0UL;
6880 else if (pg_flags == _PGMT_WC)
6881 return _PAGE_CACHE_WC;
6882 else if (pg_flags == _PGMT_UC_MINUS)
6883diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6884--- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6885+++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6886@@ -5,12 +5,13 @@
6887
6888 /* L1 cache line size */
6889 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6890-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6891+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6892
6893 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6894+#define __read_only __attribute__((__section__(".data..read_only")))
6895
6896 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6897-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6898+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6899
6900 #ifdef CONFIG_X86_VSMP
6901 #ifdef CONFIG_SMP
6902diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6903--- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6904+++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6905@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6906 int len, __wsum sum,
6907 int *src_err_ptr, int *dst_err_ptr);
6908
6909+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6910+ int len, __wsum sum,
6911+ int *src_err_ptr, int *dst_err_ptr);
6912+
6913+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6914+ int len, __wsum sum,
6915+ int *src_err_ptr, int *dst_err_ptr);
6916+
6917 /*
6918 * Note: when you get a NULL pointer exception here this means someone
6919 * passed in an incorrect kernel address to one of these functions.
6920@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6921 int *err_ptr)
6922 {
6923 might_sleep();
6924- return csum_partial_copy_generic((__force void *)src, dst,
6925+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6926 len, sum, err_ptr, NULL);
6927 }
6928
6929@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6930 {
6931 might_sleep();
6932 if (access_ok(VERIFY_WRITE, dst, len))
6933- return csum_partial_copy_generic(src, (__force void *)dst,
6934+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6935 len, sum, NULL, err_ptr);
6936
6937 if (len)
6938diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6939--- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6940+++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6941@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6942 ".section .discard,\"aw\",@progbits\n"
6943 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6944 ".previous\n"
6945- ".section .altinstr_replacement,\"ax\"\n"
6946+ ".section .altinstr_replacement,\"a\"\n"
6947 "3: movb $1,%0\n"
6948 "4:\n"
6949 ".previous\n"
6950diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6951--- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6952+++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6953@@ -31,6 +31,12 @@ struct desc_struct {
6954 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6955 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6956 };
6957+ struct {
6958+ u16 offset_low;
6959+ u16 seg;
6960+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6961+ unsigned offset_high: 16;
6962+ } gate;
6963 };
6964 } __attribute__((packed));
6965
6966diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6967--- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6968+++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6969@@ -4,6 +4,7 @@
6970 #include <asm/desc_defs.h>
6971 #include <asm/ldt.h>
6972 #include <asm/mmu.h>
6973+#include <asm/pgtable.h>
6974
6975 #include <linux/smp.h>
6976
6977@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6978
6979 desc->type = (info->read_exec_only ^ 1) << 1;
6980 desc->type |= info->contents << 2;
6981+ desc->type |= info->seg_not_present ^ 1;
6982
6983 desc->s = 1;
6984 desc->dpl = 0x3;
6985@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6986 }
6987
6988 extern struct desc_ptr idt_descr;
6989-extern gate_desc idt_table[];
6990-
6991-struct gdt_page {
6992- struct desc_struct gdt[GDT_ENTRIES];
6993-} __attribute__((aligned(PAGE_SIZE)));
6994-
6995-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6996+extern gate_desc idt_table[256];
6997
6998+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6999 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7000 {
7001- return per_cpu(gdt_page, cpu).gdt;
7002+ return cpu_gdt_table[cpu];
7003 }
7004
7005 #ifdef CONFIG_X86_64
7006@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7007 unsigned long base, unsigned dpl, unsigned flags,
7008 unsigned short seg)
7009 {
7010- gate->a = (seg << 16) | (base & 0xffff);
7011- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7012+ gate->gate.offset_low = base;
7013+ gate->gate.seg = seg;
7014+ gate->gate.reserved = 0;
7015+ gate->gate.type = type;
7016+ gate->gate.s = 0;
7017+ gate->gate.dpl = dpl;
7018+ gate->gate.p = 1;
7019+ gate->gate.offset_high = base >> 16;
7020 }
7021
7022 #endif
7023@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7024
7025 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7026 {
7027+ pax_open_kernel();
7028 memcpy(&idt[entry], gate, sizeof(*gate));
7029+ pax_close_kernel();
7030 }
7031
7032 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7033 {
7034+ pax_open_kernel();
7035 memcpy(&ldt[entry], desc, 8);
7036+ pax_close_kernel();
7037 }
7038
7039 static inline void
7040@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7041 default: size = sizeof(*gdt); break;
7042 }
7043
7044+ pax_open_kernel();
7045 memcpy(&gdt[entry], desc, size);
7046+ pax_close_kernel();
7047 }
7048
7049 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7050@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7051
7052 static inline void native_load_tr_desc(void)
7053 {
7054+ pax_open_kernel();
7055 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7056+ pax_close_kernel();
7057 }
7058
7059 static inline void native_load_gdt(const struct desc_ptr *dtr)
7060@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7061 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7062 unsigned int i;
7063
7064+ pax_open_kernel();
7065 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7066 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7067+ pax_close_kernel();
7068 }
7069
7070 #define _LDT_empty(info) \
7071@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7072 desc->limit = (limit >> 16) & 0xf;
7073 }
7074
7075-static inline void _set_gate(int gate, unsigned type, void *addr,
7076+static inline void _set_gate(int gate, unsigned type, const void *addr,
7077 unsigned dpl, unsigned ist, unsigned seg)
7078 {
7079 gate_desc s;
7080@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7081 * Pentium F0 0F bugfix can have resulted in the mapped
7082 * IDT being write-protected.
7083 */
7084-static inline void set_intr_gate(unsigned int n, void *addr)
7085+static inline void set_intr_gate(unsigned int n, const void *addr)
7086 {
7087 BUG_ON((unsigned)n > 0xFF);
7088 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7089@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7090 /*
7091 * This routine sets up an interrupt gate at directory privilege level 3.
7092 */
7093-static inline void set_system_intr_gate(unsigned int n, void *addr)
7094+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7095 {
7096 BUG_ON((unsigned)n > 0xFF);
7097 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7098 }
7099
7100-static inline void set_system_trap_gate(unsigned int n, void *addr)
7101+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7102 {
7103 BUG_ON((unsigned)n > 0xFF);
7104 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7105 }
7106
7107-static inline void set_trap_gate(unsigned int n, void *addr)
7108+static inline void set_trap_gate(unsigned int n, const void *addr)
7109 {
7110 BUG_ON((unsigned)n > 0xFF);
7111 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7112@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7113 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7114 {
7115 BUG_ON((unsigned)n > 0xFF);
7116- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7117+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7118 }
7119
7120-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7121+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7122 {
7123 BUG_ON((unsigned)n > 0xFF);
7124 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7125 }
7126
7127-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7128+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7129 {
7130 BUG_ON((unsigned)n > 0xFF);
7131 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7132 }
7133
7134+#ifdef CONFIG_X86_32
7135+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7136+{
7137+ struct desc_struct d;
7138+
7139+ if (likely(limit))
7140+ limit = (limit - 1UL) >> PAGE_SHIFT;
7141+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7142+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7143+}
7144+#endif
7145+
7146 #endif /* _ASM_X86_DESC_H */
7147diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7148--- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7149+++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7150@@ -69,7 +69,7 @@ struct e820map {
7151 #define ISA_START_ADDRESS 0xa0000
7152 #define ISA_END_ADDRESS 0x100000
7153
7154-#define BIOS_BEGIN 0x000a0000
7155+#define BIOS_BEGIN 0x000c0000
7156 #define BIOS_END 0x00100000
7157
7158 #define BIOS_ROM_BASE 0xffe00000
7159diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7160--- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7161+++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7162@@ -237,7 +237,25 @@ extern int force_personality32;
7163 the loader. We need to make sure that it is out of the way of the program
7164 that it will "exec", and that there is sufficient room for the brk. */
7165
7166+#ifdef CONFIG_PAX_SEGMEXEC
7167+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7168+#else
7169 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7170+#endif
7171+
7172+#ifdef CONFIG_PAX_ASLR
7173+#ifdef CONFIG_X86_32
7174+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7175+
7176+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7177+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7178+#else
7179+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7180+
7181+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7182+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7183+#endif
7184+#endif
7185
7186 /* This yields a mask that user programs can use to figure out what
7187 instruction set this CPU supports. This could be done in user space,
7188@@ -290,9 +308,7 @@ do { \
7189
7190 #define ARCH_DLINFO \
7191 do { \
7192- if (vdso_enabled) \
7193- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7194- (unsigned long)current->mm->context.vdso); \
7195+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7196 } while (0)
7197
7198 #define AT_SYSINFO 32
7199@@ -303,7 +319,7 @@ do { \
7200
7201 #endif /* !CONFIG_X86_32 */
7202
7203-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7204+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7205
7206 #define VDSO_ENTRY \
7207 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7208@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7209 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7210 #define compat_arch_setup_additional_pages syscall32_setup_pages
7211
7212-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7213-#define arch_randomize_brk arch_randomize_brk
7214-
7215 #endif /* _ASM_X86_ELF_H */
7216diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7217--- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7218+++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7219@@ -15,6 +15,6 @@ enum reboot_type {
7220
7221 extern enum reboot_type reboot_type;
7222
7223-extern void machine_emergency_restart(void);
7224+extern void machine_emergency_restart(void) __noreturn;
7225
7226 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7227diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7228--- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7229+++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7230@@ -12,16 +12,18 @@
7231 #include <asm/system.h>
7232
7233 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7234+ typecheck(u32 *, uaddr); \
7235 asm volatile("1:\t" insn "\n" \
7236 "2:\t.section .fixup,\"ax\"\n" \
7237 "3:\tmov\t%3, %1\n" \
7238 "\tjmp\t2b\n" \
7239 "\t.previous\n" \
7240 _ASM_EXTABLE(1b, 3b) \
7241- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7242+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7243 : "i" (-EFAULT), "0" (oparg), "1" (0))
7244
7245 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7246+ typecheck(u32 *, uaddr); \
7247 asm volatile("1:\tmovl %2, %0\n" \
7248 "\tmovl\t%0, %3\n" \
7249 "\t" insn "\n" \
7250@@ -34,7 +36,7 @@
7251 _ASM_EXTABLE(1b, 4b) \
7252 _ASM_EXTABLE(2b, 4b) \
7253 : "=&a" (oldval), "=&r" (ret), \
7254- "+m" (*uaddr), "=&r" (tem) \
7255+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7256 : "r" (oparg), "i" (-EFAULT), "1" (0))
7257
7258 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7259@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7260
7261 switch (op) {
7262 case FUTEX_OP_SET:
7263- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7264+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7265 break;
7266 case FUTEX_OP_ADD:
7267- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7268+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7269 uaddr, oparg);
7270 break;
7271 case FUTEX_OP_OR:
7272@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7273 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7274 return -EFAULT;
7275
7276- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7277+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7278 "2:\t.section .fixup, \"ax\"\n"
7279 "3:\tmov %3, %0\n"
7280 "\tjmp 2b\n"
7281 "\t.previous\n"
7282 _ASM_EXTABLE(1b, 3b)
7283- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7284+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7285 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7286 : "memory"
7287 );
7288diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7289--- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7290+++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7291@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7292 extern void enable_IO_APIC(void);
7293
7294 /* Statistics */
7295-extern atomic_t irq_err_count;
7296-extern atomic_t irq_mis_count;
7297+extern atomic_unchecked_t irq_err_count;
7298+extern atomic_unchecked_t irq_mis_count;
7299
7300 /* EISA */
7301 extern void eisa_set_level_irq(unsigned int irq);
7302diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7303--- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7304+++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7305@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7306 {
7307 int err;
7308
7309+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7310+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7311+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7312+#endif
7313+
7314 /* See comment in fxsave() below. */
7315 #ifdef CONFIG_AS_FXSAVEQ
7316 asm volatile("1: fxrstorq %[fx]\n\t"
7317@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7318 {
7319 int err;
7320
7321+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7322+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7323+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7324+#endif
7325+
7326 /*
7327 * Clear the bytes not touched by the fxsave and reserved
7328 * for the SW usage.
7329@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7330 #endif /* CONFIG_X86_64 */
7331
7332 /* We need a safe address that is cheap to find and that is already
7333- in L1 during context switch. The best choices are unfortunately
7334- different for UP and SMP */
7335-#ifdef CONFIG_SMP
7336-#define safe_address (__per_cpu_offset[0])
7337-#else
7338-#define safe_address (kstat_cpu(0).cpustat.user)
7339-#endif
7340+ in L1 during context switch. */
7341+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7342
7343 /*
7344 * These must be called with preempt disabled
7345@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7346 struct thread_info *me = current_thread_info();
7347 preempt_disable();
7348 if (me->status & TS_USEDFPU)
7349- __save_init_fpu(me->task);
7350+ __save_init_fpu(current);
7351 else
7352 clts();
7353 }
7354diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7355--- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7356+++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7357@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7358
7359 #include <linux/vmalloc.h>
7360
7361+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7362+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7363+{
7364+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7365+}
7366+
7367+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7368+{
7369+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7370+}
7371+
7372 /*
7373 * Convert a virtual cached pointer to an uncached pointer
7374 */
7375diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7376--- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7377+++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7378@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7379 sti; \
7380 sysexit
7381
7382+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7383+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7384+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7385+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7386+
7387 #else
7388 #define INTERRUPT_RETURN iret
7389 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7390diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7391--- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7392+++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7393@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7394 #define RELATIVEJUMP_SIZE 5
7395 #define RELATIVECALL_OPCODE 0xe8
7396 #define RELATIVE_ADDR_SIZE 4
7397-#define MAX_STACK_SIZE 64
7398-#define MIN_STACK_SIZE(ADDR) \
7399- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7400- THREAD_SIZE - (unsigned long)(ADDR))) \
7401- ? (MAX_STACK_SIZE) \
7402- : (((unsigned long)current_thread_info()) + \
7403- THREAD_SIZE - (unsigned long)(ADDR)))
7404+#define MAX_STACK_SIZE 64UL
7405+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7406
7407 #define flush_insn_slot(p) do { } while (0)
7408
7409diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7410--- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7411+++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7412@@ -441,7 +441,7 @@ struct kvm_arch {
7413 unsigned int n_used_mmu_pages;
7414 unsigned int n_requested_mmu_pages;
7415 unsigned int n_max_mmu_pages;
7416- atomic_t invlpg_counter;
7417+ atomic_unchecked_t invlpg_counter;
7418 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7419 /*
7420 * Hash table of struct kvm_mmu_page.
7421@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7422 enum x86_intercept_stage stage);
7423
7424 const struct trace_print_flags *exit_reasons_str;
7425-};
7426+} __do_const;
7427
7428 struct kvm_arch_async_pf {
7429 u32 token;
7430diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7431--- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7432+++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7433@@ -18,26 +18,58 @@ typedef struct {
7434
7435 static inline void local_inc(local_t *l)
7436 {
7437- asm volatile(_ASM_INC "%0"
7438+ asm volatile(_ASM_INC "%0\n"
7439+
7440+#ifdef CONFIG_PAX_REFCOUNT
7441+ "jno 0f\n"
7442+ _ASM_DEC "%0\n"
7443+ "int $4\n0:\n"
7444+ _ASM_EXTABLE(0b, 0b)
7445+#endif
7446+
7447 : "+m" (l->a.counter));
7448 }
7449
7450 static inline void local_dec(local_t *l)
7451 {
7452- asm volatile(_ASM_DEC "%0"
7453+ asm volatile(_ASM_DEC "%0\n"
7454+
7455+#ifdef CONFIG_PAX_REFCOUNT
7456+ "jno 0f\n"
7457+ _ASM_INC "%0\n"
7458+ "int $4\n0:\n"
7459+ _ASM_EXTABLE(0b, 0b)
7460+#endif
7461+
7462 : "+m" (l->a.counter));
7463 }
7464
7465 static inline void local_add(long i, local_t *l)
7466 {
7467- asm volatile(_ASM_ADD "%1,%0"
7468+ asm volatile(_ASM_ADD "%1,%0\n"
7469+
7470+#ifdef CONFIG_PAX_REFCOUNT
7471+ "jno 0f\n"
7472+ _ASM_SUB "%1,%0\n"
7473+ "int $4\n0:\n"
7474+ _ASM_EXTABLE(0b, 0b)
7475+#endif
7476+
7477 : "+m" (l->a.counter)
7478 : "ir" (i));
7479 }
7480
7481 static inline void local_sub(long i, local_t *l)
7482 {
7483- asm volatile(_ASM_SUB "%1,%0"
7484+ asm volatile(_ASM_SUB "%1,%0\n"
7485+
7486+#ifdef CONFIG_PAX_REFCOUNT
7487+ "jno 0f\n"
7488+ _ASM_ADD "%1,%0\n"
7489+ "int $4\n0:\n"
7490+ _ASM_EXTABLE(0b, 0b)
7491+#endif
7492+
7493 : "+m" (l->a.counter)
7494 : "ir" (i));
7495 }
7496@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7497 {
7498 unsigned char c;
7499
7500- asm volatile(_ASM_SUB "%2,%0; sete %1"
7501+ asm volatile(_ASM_SUB "%2,%0\n"
7502+
7503+#ifdef CONFIG_PAX_REFCOUNT
7504+ "jno 0f\n"
7505+ _ASM_ADD "%2,%0\n"
7506+ "int $4\n0:\n"
7507+ _ASM_EXTABLE(0b, 0b)
7508+#endif
7509+
7510+ "sete %1\n"
7511 : "+m" (l->a.counter), "=qm" (c)
7512 : "ir" (i) : "memory");
7513 return c;
7514@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7515 {
7516 unsigned char c;
7517
7518- asm volatile(_ASM_DEC "%0; sete %1"
7519+ asm volatile(_ASM_DEC "%0\n"
7520+
7521+#ifdef CONFIG_PAX_REFCOUNT
7522+ "jno 0f\n"
7523+ _ASM_INC "%0\n"
7524+ "int $4\n0:\n"
7525+ _ASM_EXTABLE(0b, 0b)
7526+#endif
7527+
7528+ "sete %1\n"
7529 : "+m" (l->a.counter), "=qm" (c)
7530 : : "memory");
7531 return c != 0;
7532@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7533 {
7534 unsigned char c;
7535
7536- asm volatile(_ASM_INC "%0; sete %1"
7537+ asm volatile(_ASM_INC "%0\n"
7538+
7539+#ifdef CONFIG_PAX_REFCOUNT
7540+ "jno 0f\n"
7541+ _ASM_DEC "%0\n"
7542+ "int $4\n0:\n"
7543+ _ASM_EXTABLE(0b, 0b)
7544+#endif
7545+
7546+ "sete %1\n"
7547 : "+m" (l->a.counter), "=qm" (c)
7548 : : "memory");
7549 return c != 0;
7550@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7551 {
7552 unsigned char c;
7553
7554- asm volatile(_ASM_ADD "%2,%0; sets %1"
7555+ asm volatile(_ASM_ADD "%2,%0\n"
7556+
7557+#ifdef CONFIG_PAX_REFCOUNT
7558+ "jno 0f\n"
7559+ _ASM_SUB "%2,%0\n"
7560+ "int $4\n0:\n"
7561+ _ASM_EXTABLE(0b, 0b)
7562+#endif
7563+
7564+ "sets %1\n"
7565 : "+m" (l->a.counter), "=qm" (c)
7566 : "ir" (i) : "memory");
7567 return c;
7568@@ -133,7 +201,15 @@ static inline long local_add_return(long
7569 #endif
7570 /* Modern 486+ processor */
7571 __i = i;
7572- asm volatile(_ASM_XADD "%0, %1;"
7573+ asm volatile(_ASM_XADD "%0, %1\n"
7574+
7575+#ifdef CONFIG_PAX_REFCOUNT
7576+ "jno 0f\n"
7577+ _ASM_MOV "%0,%1\n"
7578+ "int $4\n0:\n"
7579+ _ASM_EXTABLE(0b, 0b)
7580+#endif
7581+
7582 : "+r" (i), "+m" (l->a.counter)
7583 : : "memory");
7584 return i + __i;
7585diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7586--- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7587+++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7588@@ -5,4 +5,14 @@
7589
7590 #include <asm-generic/mman.h>
7591
7592+#ifdef __KERNEL__
7593+#ifndef __ASSEMBLY__
7594+#ifdef CONFIG_X86_32
7595+#define arch_mmap_check i386_mmap_check
7596+int i386_mmap_check(unsigned long addr, unsigned long len,
7597+ unsigned long flags);
7598+#endif
7599+#endif
7600+#endif
7601+
7602 #endif /* _ASM_X86_MMAN_H */
7603diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7604--- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7605+++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7606@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7607
7608 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7609 {
7610+
7611+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7612+ unsigned int i;
7613+ pgd_t *pgd;
7614+
7615+ pax_open_kernel();
7616+ pgd = get_cpu_pgd(smp_processor_id());
7617+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7618+ set_pgd_batched(pgd+i, native_make_pgd(0));
7619+ pax_close_kernel();
7620+#endif
7621+
7622 #ifdef CONFIG_SMP
7623 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7624 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7625@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7626 struct task_struct *tsk)
7627 {
7628 unsigned cpu = smp_processor_id();
7629+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7630+ int tlbstate = TLBSTATE_OK;
7631+#endif
7632
7633 if (likely(prev != next)) {
7634 #ifdef CONFIG_SMP
7635+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7636+ tlbstate = percpu_read(cpu_tlbstate.state);
7637+#endif
7638 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7639 percpu_write(cpu_tlbstate.active_mm, next);
7640 #endif
7641 cpumask_set_cpu(cpu, mm_cpumask(next));
7642
7643 /* Re-load page tables */
7644+#ifdef CONFIG_PAX_PER_CPU_PGD
7645+ pax_open_kernel();
7646+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7647+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7648+ pax_close_kernel();
7649+ load_cr3(get_cpu_pgd(cpu));
7650+#else
7651 load_cr3(next->pgd);
7652+#endif
7653
7654 /* stop flush ipis for the previous mm */
7655 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7656@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7657 */
7658 if (unlikely(prev->context.ldt != next->context.ldt))
7659 load_LDT_nolock(&next->context);
7660- }
7661+
7662+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7663+ if (!(__supported_pte_mask & _PAGE_NX)) {
7664+ smp_mb__before_clear_bit();
7665+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7666+ smp_mb__after_clear_bit();
7667+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7668+ }
7669+#endif
7670+
7671+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7672+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7673+ prev->context.user_cs_limit != next->context.user_cs_limit))
7674+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7675 #ifdef CONFIG_SMP
7676+ else if (unlikely(tlbstate != TLBSTATE_OK))
7677+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7678+#endif
7679+#endif
7680+
7681+ }
7682 else {
7683+
7684+#ifdef CONFIG_PAX_PER_CPU_PGD
7685+ pax_open_kernel();
7686+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7687+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7688+ pax_close_kernel();
7689+ load_cr3(get_cpu_pgd(cpu));
7690+#endif
7691+
7692+#ifdef CONFIG_SMP
7693 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7694 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7695
7696@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7697 * tlb flush IPI delivery. We must reload CR3
7698 * to make sure to use no freed page tables.
7699 */
7700+
7701+#ifndef CONFIG_PAX_PER_CPU_PGD
7702 load_cr3(next->pgd);
7703+#endif
7704+
7705 load_LDT_nolock(&next->context);
7706+
7707+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7708+ if (!(__supported_pte_mask & _PAGE_NX))
7709+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7710+#endif
7711+
7712+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7713+#ifdef CONFIG_PAX_PAGEEXEC
7714+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7715+#endif
7716+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7717+#endif
7718+
7719 }
7720- }
7721 #endif
7722+ }
7723 }
7724
7725 #define activate_mm(prev, next) \
7726diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7727--- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7728+++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7729@@ -9,7 +9,7 @@
7730 * we put the segment information here.
7731 */
7732 typedef struct {
7733- void *ldt;
7734+ struct desc_struct *ldt;
7735 int size;
7736
7737 #ifdef CONFIG_X86_64
7738@@ -18,7 +18,19 @@ typedef struct {
7739 #endif
7740
7741 struct mutex lock;
7742- void *vdso;
7743+ unsigned long vdso;
7744+
7745+#ifdef CONFIG_X86_32
7746+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7747+ unsigned long user_cs_base;
7748+ unsigned long user_cs_limit;
7749+
7750+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7751+ cpumask_t cpu_user_cs_mask;
7752+#endif
7753+
7754+#endif
7755+#endif
7756 } mm_context_t;
7757
7758 #ifdef CONFIG_SMP
7759diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7760--- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7761+++ linux-3.0.4/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7762@@ -5,6 +5,7 @@
7763
7764 #ifdef CONFIG_X86_64
7765 /* X86_64 does not define MODULE_PROC_FAMILY */
7766+#define MODULE_PROC_FAMILY ""
7767 #elif defined CONFIG_M386
7768 #define MODULE_PROC_FAMILY "386 "
7769 #elif defined CONFIG_M486
7770@@ -59,8 +60,30 @@
7771 #error unknown processor family
7772 #endif
7773
7774-#ifdef CONFIG_X86_32
7775-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7776+#ifdef CONFIG_PAX_MEMORY_UDEREF
7777+#define MODULE_PAX_UDEREF "UDEREF "
7778+#else
7779+#define MODULE_PAX_UDEREF ""
7780+#endif
7781+
7782+#ifdef CONFIG_PAX_KERNEXEC
7783+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7784+#else
7785+#define MODULE_PAX_KERNEXEC ""
7786 #endif
7787
7788+#ifdef CONFIG_PAX_REFCOUNT
7789+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7790+#else
7791+#define MODULE_PAX_REFCOUNT ""
7792+#endif
7793+
7794+#ifdef CONFIG_GRKERNSEC
7795+#define MODULE_GRSEC "GRSECURITY "
7796+#else
7797+#define MODULE_GRSEC ""
7798+#endif
7799+
7800+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7801+
7802 #endif /* _ASM_X86_MODULE_H */
7803diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7804--- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7805+++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7806@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7807
7808 /* duplicated to the one in bootmem.h */
7809 extern unsigned long max_pfn;
7810-extern unsigned long phys_base;
7811+extern const unsigned long phys_base;
7812
7813 extern unsigned long __phys_addr(unsigned long);
7814 #define __phys_reloc_hide(x) (x)
7815diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7816--- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7817+++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7818@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7819 val);
7820 }
7821
7822+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7823+{
7824+ pgdval_t val = native_pgd_val(pgd);
7825+
7826+ if (sizeof(pgdval_t) > sizeof(long))
7827+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7828+ val, (u64)val >> 32);
7829+ else
7830+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7831+ val);
7832+}
7833+
7834 static inline void pgd_clear(pgd_t *pgdp)
7835 {
7836 set_pgd(pgdp, __pgd(0));
7837@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7838 pv_mmu_ops.set_fixmap(idx, phys, flags);
7839 }
7840
7841+#ifdef CONFIG_PAX_KERNEXEC
7842+static inline unsigned long pax_open_kernel(void)
7843+{
7844+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7845+}
7846+
7847+static inline unsigned long pax_close_kernel(void)
7848+{
7849+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7850+}
7851+#else
7852+static inline unsigned long pax_open_kernel(void) { return 0; }
7853+static inline unsigned long pax_close_kernel(void) { return 0; }
7854+#endif
7855+
7856 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7857
7858 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7859@@ -955,7 +982,7 @@ extern void default_banner(void);
7860
7861 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7862 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7863-#define PARA_INDIRECT(addr) *%cs:addr
7864+#define PARA_INDIRECT(addr) *%ss:addr
7865 #endif
7866
7867 #define INTERRUPT_RETURN \
7868@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7869 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7870 CLBR_NONE, \
7871 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7872+
7873+#define GET_CR0_INTO_RDI \
7874+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7875+ mov %rax,%rdi
7876+
7877+#define SET_RDI_INTO_CR0 \
7878+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7879+
7880+#define GET_CR3_INTO_RDI \
7881+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7882+ mov %rax,%rdi
7883+
7884+#define SET_RDI_INTO_CR3 \
7885+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7886+
7887 #endif /* CONFIG_X86_32 */
7888
7889 #endif /* __ASSEMBLY__ */
7890diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7891--- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7892+++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7893@@ -78,19 +78,19 @@ struct pv_init_ops {
7894 */
7895 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7896 unsigned long addr, unsigned len);
7897-};
7898+} __no_const;
7899
7900
7901 struct pv_lazy_ops {
7902 /* Set deferred update mode, used for batching operations. */
7903 void (*enter)(void);
7904 void (*leave)(void);
7905-};
7906+} __no_const;
7907
7908 struct pv_time_ops {
7909 unsigned long long (*sched_clock)(void);
7910 unsigned long (*get_tsc_khz)(void);
7911-};
7912+} __no_const;
7913
7914 struct pv_cpu_ops {
7915 /* hooks for various privileged instructions */
7916@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7917
7918 void (*start_context_switch)(struct task_struct *prev);
7919 void (*end_context_switch)(struct task_struct *next);
7920-};
7921+} __no_const;
7922
7923 struct pv_irq_ops {
7924 /*
7925@@ -217,7 +217,7 @@ struct pv_apic_ops {
7926 unsigned long start_eip,
7927 unsigned long start_esp);
7928 #endif
7929-};
7930+} __no_const;
7931
7932 struct pv_mmu_ops {
7933 unsigned long (*read_cr2)(void);
7934@@ -306,6 +306,7 @@ struct pv_mmu_ops {
7935 struct paravirt_callee_save make_pud;
7936
7937 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7938+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7939 #endif /* PAGETABLE_LEVELS == 4 */
7940 #endif /* PAGETABLE_LEVELS >= 3 */
7941
7942@@ -317,6 +318,12 @@ struct pv_mmu_ops {
7943 an mfn. We can tell which is which from the index. */
7944 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7945 phys_addr_t phys, pgprot_t flags);
7946+
7947+#ifdef CONFIG_PAX_KERNEXEC
7948+ unsigned long (*pax_open_kernel)(void);
7949+ unsigned long (*pax_close_kernel)(void);
7950+#endif
7951+
7952 };
7953
7954 struct arch_spinlock;
7955@@ -327,7 +334,7 @@ struct pv_lock_ops {
7956 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7957 int (*spin_trylock)(struct arch_spinlock *lock);
7958 void (*spin_unlock)(struct arch_spinlock *lock);
7959-};
7960+} __no_const;
7961
7962 /* This contains all the paravirt structures: we get a convenient
7963 * number for each function using the offset which we use to indicate
7964diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7965--- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7966+++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7967@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7968 pmd_t *pmd, pte_t *pte)
7969 {
7970 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7971+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7972+}
7973+
7974+static inline void pmd_populate_user(struct mm_struct *mm,
7975+ pmd_t *pmd, pte_t *pte)
7976+{
7977+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7978 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7979 }
7980
7981diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7982--- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7983+++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7984@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7985
7986 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7987 {
7988+ pax_open_kernel();
7989 *pmdp = pmd;
7990+ pax_close_kernel();
7991 }
7992
7993 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7994diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7995--- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7996+++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7997@@ -25,9 +25,6 @@
7998 struct mm_struct;
7999 struct vm_area_struct;
8000
8001-extern pgd_t swapper_pg_dir[1024];
8002-extern pgd_t initial_page_table[1024];
8003-
8004 static inline void pgtable_cache_init(void) { }
8005 static inline void check_pgt_cache(void) { }
8006 void paging_init(void);
8007@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8008 # include <asm/pgtable-2level.h>
8009 #endif
8010
8011+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8012+extern pgd_t initial_page_table[PTRS_PER_PGD];
8013+#ifdef CONFIG_X86_PAE
8014+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8015+#endif
8016+
8017 #if defined(CONFIG_HIGHPTE)
8018 #define pte_offset_map(dir, address) \
8019 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8020@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8021 /* Clear a kernel PTE and flush it from the TLB */
8022 #define kpte_clear_flush(ptep, vaddr) \
8023 do { \
8024+ pax_open_kernel(); \
8025 pte_clear(&init_mm, (vaddr), (ptep)); \
8026+ pax_close_kernel(); \
8027 __flush_tlb_one((vaddr)); \
8028 } while (0)
8029
8030@@ -74,6 +79,9 @@ do { \
8031
8032 #endif /* !__ASSEMBLY__ */
8033
8034+#define HAVE_ARCH_UNMAPPED_AREA
8035+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8036+
8037 /*
8038 * kern_addr_valid() is (1) for FLATMEM and (0) for
8039 * SPARSEMEM and DISCONTIGMEM
8040diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
8041--- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8042+++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8043@@ -8,7 +8,7 @@
8044 */
8045 #ifdef CONFIG_X86_PAE
8046 # include <asm/pgtable-3level_types.h>
8047-# define PMD_SIZE (1UL << PMD_SHIFT)
8048+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8049 # define PMD_MASK (~(PMD_SIZE - 1))
8050 #else
8051 # include <asm/pgtable-2level_types.h>
8052@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8053 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8054 #endif
8055
8056+#ifdef CONFIG_PAX_KERNEXEC
8057+#ifndef __ASSEMBLY__
8058+extern unsigned char MODULES_EXEC_VADDR[];
8059+extern unsigned char MODULES_EXEC_END[];
8060+#endif
8061+#include <asm/boot.h>
8062+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8063+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8064+#else
8065+#define ktla_ktva(addr) (addr)
8066+#define ktva_ktla(addr) (addr)
8067+#endif
8068+
8069 #define MODULES_VADDR VMALLOC_START
8070 #define MODULES_END VMALLOC_END
8071 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8072diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8073--- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8074+++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8075@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8076
8077 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8078 {
8079+ pax_open_kernel();
8080 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8081+ pax_close_kernel();
8082 }
8083
8084 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8085 {
8086+ pax_open_kernel();
8087 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8088+ pax_close_kernel();
8089 }
8090
8091 /*
8092diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8093--- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8094+++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8095@@ -16,10 +16,13 @@
8096
8097 extern pud_t level3_kernel_pgt[512];
8098 extern pud_t level3_ident_pgt[512];
8099+extern pud_t level3_vmalloc_pgt[512];
8100+extern pud_t level3_vmemmap_pgt[512];
8101+extern pud_t level2_vmemmap_pgt[512];
8102 extern pmd_t level2_kernel_pgt[512];
8103 extern pmd_t level2_fixmap_pgt[512];
8104-extern pmd_t level2_ident_pgt[512];
8105-extern pgd_t init_level4_pgt[];
8106+extern pmd_t level2_ident_pgt[512*2];
8107+extern pgd_t init_level4_pgt[512];
8108
8109 #define swapper_pg_dir init_level4_pgt
8110
8111@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8112
8113 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8114 {
8115+ pax_open_kernel();
8116 *pmdp = pmd;
8117+ pax_close_kernel();
8118 }
8119
8120 static inline void native_pmd_clear(pmd_t *pmd)
8121@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8122
8123 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8124 {
8125+ pax_open_kernel();
8126+ *pgdp = pgd;
8127+ pax_close_kernel();
8128+}
8129+
8130+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8131+{
8132 *pgdp = pgd;
8133 }
8134
8135diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8136--- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8137+++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8138@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8139 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8140 #define MODULES_END _AC(0xffffffffff000000, UL)
8141 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8142+#define MODULES_EXEC_VADDR MODULES_VADDR
8143+#define MODULES_EXEC_END MODULES_END
8144+
8145+#define ktla_ktva(addr) (addr)
8146+#define ktva_ktla(addr) (addr)
8147
8148 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8149diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8150--- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8151+++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8152@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8153
8154 #ifndef __PAGETABLE_PUD_FOLDED
8155 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8156+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8157 #define pgd_clear(pgd) native_pgd_clear(pgd)
8158 #endif
8159
8160@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8161
8162 #define arch_end_context_switch(prev) do {} while(0)
8163
8164+#define pax_open_kernel() native_pax_open_kernel()
8165+#define pax_close_kernel() native_pax_close_kernel()
8166 #endif /* CONFIG_PARAVIRT */
8167
8168+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8169+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8170+
8171+#ifdef CONFIG_PAX_KERNEXEC
8172+static inline unsigned long native_pax_open_kernel(void)
8173+{
8174+ unsigned long cr0;
8175+
8176+ preempt_disable();
8177+ barrier();
8178+ cr0 = read_cr0() ^ X86_CR0_WP;
8179+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8180+ write_cr0(cr0);
8181+ return cr0 ^ X86_CR0_WP;
8182+}
8183+
8184+static inline unsigned long native_pax_close_kernel(void)
8185+{
8186+ unsigned long cr0;
8187+
8188+ cr0 = read_cr0() ^ X86_CR0_WP;
8189+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8190+ write_cr0(cr0);
8191+ barrier();
8192+ preempt_enable_no_resched();
8193+ return cr0 ^ X86_CR0_WP;
8194+}
8195+#else
8196+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8197+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8198+#endif
8199+
8200 /*
8201 * The following only work if pte_present() is true.
8202 * Undefined behaviour if not..
8203 */
8204+static inline int pte_user(pte_t pte)
8205+{
8206+ return pte_val(pte) & _PAGE_USER;
8207+}
8208+
8209 static inline int pte_dirty(pte_t pte)
8210 {
8211 return pte_flags(pte) & _PAGE_DIRTY;
8212@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8213 return pte_clear_flags(pte, _PAGE_RW);
8214 }
8215
8216+static inline pte_t pte_mkread(pte_t pte)
8217+{
8218+ return __pte(pte_val(pte) | _PAGE_USER);
8219+}
8220+
8221 static inline pte_t pte_mkexec(pte_t pte)
8222 {
8223- return pte_clear_flags(pte, _PAGE_NX);
8224+#ifdef CONFIG_X86_PAE
8225+ if (__supported_pte_mask & _PAGE_NX)
8226+ return pte_clear_flags(pte, _PAGE_NX);
8227+ else
8228+#endif
8229+ return pte_set_flags(pte, _PAGE_USER);
8230+}
8231+
8232+static inline pte_t pte_exprotect(pte_t pte)
8233+{
8234+#ifdef CONFIG_X86_PAE
8235+ if (__supported_pte_mask & _PAGE_NX)
8236+ return pte_set_flags(pte, _PAGE_NX);
8237+ else
8238+#endif
8239+ return pte_clear_flags(pte, _PAGE_USER);
8240 }
8241
8242 static inline pte_t pte_mkdirty(pte_t pte)
8243@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8244 #endif
8245
8246 #ifndef __ASSEMBLY__
8247+
8248+#ifdef CONFIG_PAX_PER_CPU_PGD
8249+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8250+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8251+{
8252+ return cpu_pgd[cpu];
8253+}
8254+#endif
8255+
8256 #include <linux/mm_types.h>
8257
8258 static inline int pte_none(pte_t pte)
8259@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8260
8261 static inline int pgd_bad(pgd_t pgd)
8262 {
8263- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8264+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8265 }
8266
8267 static inline int pgd_none(pgd_t pgd)
8268@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8269 * pgd_offset() returns a (pgd_t *)
8270 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8271 */
8272-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8273+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8274+
8275+#ifdef CONFIG_PAX_PER_CPU_PGD
8276+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8277+#endif
8278+
8279 /*
8280 * a shortcut which implies the use of the kernel's pgd, instead
8281 * of a process's
8282@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8283 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8284 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8285
8286+#ifdef CONFIG_X86_32
8287+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8288+#else
8289+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8290+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8291+
8292+#ifdef CONFIG_PAX_MEMORY_UDEREF
8293+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8294+#else
8295+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8296+#endif
8297+
8298+#endif
8299+
8300 #ifndef __ASSEMBLY__
8301
8302 extern int direct_gbpages;
8303@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8304 * dst and src can be on the same page, but the range must not overlap,
8305 * and must not cross a page boundary.
8306 */
8307-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8308+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8309 {
8310- memcpy(dst, src, count * sizeof(pgd_t));
8311+ pax_open_kernel();
8312+ while (count--)
8313+ *dst++ = *src++;
8314+ pax_close_kernel();
8315 }
8316
8317+#ifdef CONFIG_PAX_PER_CPU_PGD
8318+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8319+#endif
8320+
8321+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8322+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8323+#else
8324+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8325+#endif
8326
8327 #include <asm-generic/pgtable.h>
8328 #endif /* __ASSEMBLY__ */
8329diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8330--- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8331+++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8332@@ -16,13 +16,12 @@
8333 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8334 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8335 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8336-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8337+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8338 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8339 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8340 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8341-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8342-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8343-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8344+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8345+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8346 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8347
8348 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8349@@ -40,7 +39,6 @@
8350 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8351 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8352 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8353-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8354 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8355 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8356 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8357@@ -57,8 +55,10 @@
8358
8359 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8360 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8361-#else
8362+#elif defined(CONFIG_KMEMCHECK)
8363 #define _PAGE_NX (_AT(pteval_t, 0))
8364+#else
8365+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8366 #endif
8367
8368 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8369@@ -96,6 +96,9 @@
8370 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8371 _PAGE_ACCESSED)
8372
8373+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8374+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8375+
8376 #define __PAGE_KERNEL_EXEC \
8377 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8378 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8379@@ -106,8 +109,8 @@
8380 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8381 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8382 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8383-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8384-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8385+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8386+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8387 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8388 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8389 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8390@@ -166,8 +169,8 @@
8391 * bits are combined, this will alow user to access the high address mapped
8392 * VDSO in the presence of CONFIG_COMPAT_VDSO
8393 */
8394-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8395-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8396+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8397+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8398 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8399 #endif
8400
8401@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8402 {
8403 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8404 }
8405+#endif
8406
8407+#if PAGETABLE_LEVELS == 3
8408+#include <asm-generic/pgtable-nopud.h>
8409+#endif
8410+
8411+#if PAGETABLE_LEVELS == 2
8412+#include <asm-generic/pgtable-nopmd.h>
8413+#endif
8414+
8415+#ifndef __ASSEMBLY__
8416 #if PAGETABLE_LEVELS > 3
8417 typedef struct { pudval_t pud; } pud_t;
8418
8419@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8420 return pud.pud;
8421 }
8422 #else
8423-#include <asm-generic/pgtable-nopud.h>
8424-
8425 static inline pudval_t native_pud_val(pud_t pud)
8426 {
8427 return native_pgd_val(pud.pgd);
8428@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8429 return pmd.pmd;
8430 }
8431 #else
8432-#include <asm-generic/pgtable-nopmd.h>
8433-
8434 static inline pmdval_t native_pmd_val(pmd_t pmd)
8435 {
8436 return native_pgd_val(pmd.pud.pgd);
8437@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8438
8439 extern pteval_t __supported_pte_mask;
8440 extern void set_nx(void);
8441-extern int nx_enabled;
8442
8443 #define pgprot_writecombine pgprot_writecombine
8444 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8445diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8446--- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8447+++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8448@@ -266,7 +266,7 @@ struct tss_struct {
8449
8450 } ____cacheline_aligned;
8451
8452-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8453+extern struct tss_struct init_tss[NR_CPUS];
8454
8455 /*
8456 * Save the original ist values for checking stack pointers during debugging
8457@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8458 */
8459 #define TASK_SIZE PAGE_OFFSET
8460 #define TASK_SIZE_MAX TASK_SIZE
8461+
8462+#ifdef CONFIG_PAX_SEGMEXEC
8463+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8464+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8465+#else
8466 #define STACK_TOP TASK_SIZE
8467-#define STACK_TOP_MAX STACK_TOP
8468+#endif
8469+
8470+#define STACK_TOP_MAX TASK_SIZE
8471
8472 #define INIT_THREAD { \
8473- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8474+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8475 .vm86_info = NULL, \
8476 .sysenter_cs = __KERNEL_CS, \
8477 .io_bitmap_ptr = NULL, \
8478@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8479 */
8480 #define INIT_TSS { \
8481 .x86_tss = { \
8482- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8483+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8484 .ss0 = __KERNEL_DS, \
8485 .ss1 = __KERNEL_CS, \
8486 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8487@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8488 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8489
8490 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8491-#define KSTK_TOP(info) \
8492-({ \
8493- unsigned long *__ptr = (unsigned long *)(info); \
8494- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8495-})
8496+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8497
8498 /*
8499 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8500@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8501 #define task_pt_regs(task) \
8502 ({ \
8503 struct pt_regs *__regs__; \
8504- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8505+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8506 __regs__ - 1; \
8507 })
8508
8509@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8510 /*
8511 * User space process size. 47bits minus one guard page.
8512 */
8513-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8514+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8515
8516 /* This decides where the kernel will search for a free chunk of vm
8517 * space during mmap's.
8518 */
8519 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8520- 0xc0000000 : 0xFFFFe000)
8521+ 0xc0000000 : 0xFFFFf000)
8522
8523 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8524 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8525@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8526 #define STACK_TOP_MAX TASK_SIZE_MAX
8527
8528 #define INIT_THREAD { \
8529- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8530+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8531 }
8532
8533 #define INIT_TSS { \
8534- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8535+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8536 }
8537
8538 /*
8539@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8540 */
8541 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8542
8543+#ifdef CONFIG_PAX_SEGMEXEC
8544+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8545+#endif
8546+
8547 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8548
8549 /* Get/set a process' ability to use the timestamp counter instruction */
8550diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8551--- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8552+++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8553@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8554 }
8555
8556 /*
8557- * user_mode_vm(regs) determines whether a register set came from user mode.
8558+ * user_mode(regs) determines whether a register set came from user mode.
8559 * This is true if V8086 mode was enabled OR if the register set was from
8560 * protected mode with RPL-3 CS value. This tricky test checks that with
8561 * one comparison. Many places in the kernel can bypass this full check
8562- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8563+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8564+ * be used.
8565 */
8566-static inline int user_mode(struct pt_regs *regs)
8567+static inline int user_mode_novm(struct pt_regs *regs)
8568 {
8569 #ifdef CONFIG_X86_32
8570 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8571 #else
8572- return !!(regs->cs & 3);
8573+ return !!(regs->cs & SEGMENT_RPL_MASK);
8574 #endif
8575 }
8576
8577-static inline int user_mode_vm(struct pt_regs *regs)
8578+static inline int user_mode(struct pt_regs *regs)
8579 {
8580 #ifdef CONFIG_X86_32
8581 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8582 USER_RPL;
8583 #else
8584- return user_mode(regs);
8585+ return user_mode_novm(regs);
8586 #endif
8587 }
8588
8589diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8590--- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8591+++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8592@@ -6,19 +6,19 @@
8593 struct pt_regs;
8594
8595 struct machine_ops {
8596- void (*restart)(char *cmd);
8597- void (*halt)(void);
8598- void (*power_off)(void);
8599+ void (* __noreturn restart)(char *cmd);
8600+ void (* __noreturn halt)(void);
8601+ void (* __noreturn power_off)(void);
8602 void (*shutdown)(void);
8603 void (*crash_shutdown)(struct pt_regs *);
8604- void (*emergency_restart)(void);
8605-};
8606+ void (* __noreturn emergency_restart)(void);
8607+} __no_const;
8608
8609 extern struct machine_ops machine_ops;
8610
8611 void native_machine_crash_shutdown(struct pt_regs *regs);
8612 void native_machine_shutdown(void);
8613-void machine_real_restart(unsigned int type);
8614+void machine_real_restart(unsigned int type) __noreturn;
8615 /* These must match dispatch_table in reboot_32.S */
8616 #define MRR_BIOS 0
8617 #define MRR_APM 1
8618diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8619--- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8620+++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8621@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8622 {
8623 asm volatile("# beginning down_read\n\t"
8624 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8625+
8626+#ifdef CONFIG_PAX_REFCOUNT
8627+ "jno 0f\n"
8628+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8629+ "int $4\n0:\n"
8630+ _ASM_EXTABLE(0b, 0b)
8631+#endif
8632+
8633 /* adds 0x00000001 */
8634 " jns 1f\n"
8635 " call call_rwsem_down_read_failed\n"
8636@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8637 "1:\n\t"
8638 " mov %1,%2\n\t"
8639 " add %3,%2\n\t"
8640+
8641+#ifdef CONFIG_PAX_REFCOUNT
8642+ "jno 0f\n"
8643+ "sub %3,%2\n"
8644+ "int $4\n0:\n"
8645+ _ASM_EXTABLE(0b, 0b)
8646+#endif
8647+
8648 " jle 2f\n\t"
8649 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8650 " jnz 1b\n\t"
8651@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8652 long tmp;
8653 asm volatile("# beginning down_write\n\t"
8654 LOCK_PREFIX " xadd %1,(%2)\n\t"
8655+
8656+#ifdef CONFIG_PAX_REFCOUNT
8657+ "jno 0f\n"
8658+ "mov %1,(%2)\n"
8659+ "int $4\n0:\n"
8660+ _ASM_EXTABLE(0b, 0b)
8661+#endif
8662+
8663 /* adds 0xffff0001, returns the old value */
8664 " test %1,%1\n\t"
8665 /* was the count 0 before? */
8666@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8667 long tmp;
8668 asm volatile("# beginning __up_read\n\t"
8669 LOCK_PREFIX " xadd %1,(%2)\n\t"
8670+
8671+#ifdef CONFIG_PAX_REFCOUNT
8672+ "jno 0f\n"
8673+ "mov %1,(%2)\n"
8674+ "int $4\n0:\n"
8675+ _ASM_EXTABLE(0b, 0b)
8676+#endif
8677+
8678 /* subtracts 1, returns the old value */
8679 " jns 1f\n\t"
8680 " call call_rwsem_wake\n" /* expects old value in %edx */
8681@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8682 long tmp;
8683 asm volatile("# beginning __up_write\n\t"
8684 LOCK_PREFIX " xadd %1,(%2)\n\t"
8685+
8686+#ifdef CONFIG_PAX_REFCOUNT
8687+ "jno 0f\n"
8688+ "mov %1,(%2)\n"
8689+ "int $4\n0:\n"
8690+ _ASM_EXTABLE(0b, 0b)
8691+#endif
8692+
8693 /* subtracts 0xffff0001, returns the old value */
8694 " jns 1f\n\t"
8695 " call call_rwsem_wake\n" /* expects old value in %edx */
8696@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8697 {
8698 asm volatile("# beginning __downgrade_write\n\t"
8699 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8700+
8701+#ifdef CONFIG_PAX_REFCOUNT
8702+ "jno 0f\n"
8703+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8704+ "int $4\n0:\n"
8705+ _ASM_EXTABLE(0b, 0b)
8706+#endif
8707+
8708 /*
8709 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8710 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8711@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8712 */
8713 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8714 {
8715- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8716+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8717+
8718+#ifdef CONFIG_PAX_REFCOUNT
8719+ "jno 0f\n"
8720+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8721+ "int $4\n0:\n"
8722+ _ASM_EXTABLE(0b, 0b)
8723+#endif
8724+
8725 : "+m" (sem->count)
8726 : "er" (delta));
8727 }
8728@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8729 {
8730 long tmp = delta;
8731
8732- asm volatile(LOCK_PREFIX "xadd %0,%1"
8733+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8734+
8735+#ifdef CONFIG_PAX_REFCOUNT
8736+ "jno 0f\n"
8737+ "mov %0,%1\n"
8738+ "int $4\n0:\n"
8739+ _ASM_EXTABLE(0b, 0b)
8740+#endif
8741+
8742 : "+r" (tmp), "+m" (sem->count)
8743 : : "memory");
8744
8745diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8746--- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8747+++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8748@@ -64,10 +64,15 @@
8749 * 26 - ESPFIX small SS
8750 * 27 - per-cpu [ offset to per-cpu data area ]
8751 * 28 - stack_canary-20 [ for stack protector ]
8752- * 29 - unused
8753- * 30 - unused
8754+ * 29 - PCI BIOS CS
8755+ * 30 - PCI BIOS DS
8756 * 31 - TSS for double fault handler
8757 */
8758+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8759+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8760+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8761+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8762+
8763 #define GDT_ENTRY_TLS_MIN 6
8764 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8765
8766@@ -79,6 +84,8 @@
8767
8768 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8769
8770+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8771+
8772 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8773
8774 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8775@@ -104,6 +111,12 @@
8776 #define __KERNEL_STACK_CANARY 0
8777 #endif
8778
8779+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8780+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8781+
8782+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8783+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8784+
8785 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8786
8787 /*
8788@@ -141,7 +154,7 @@
8789 */
8790
8791 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8792-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8793+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8794
8795
8796 #else
8797@@ -165,6 +178,8 @@
8798 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8799 #define __USER32_DS __USER_DS
8800
8801+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8802+
8803 #define GDT_ENTRY_TSS 8 /* needs two entries */
8804 #define GDT_ENTRY_LDT 10 /* needs two entries */
8805 #define GDT_ENTRY_TLS_MIN 12
8806@@ -185,6 +200,7 @@
8807 #endif
8808
8809 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8810+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8811 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8812 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8813 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8814diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8815--- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8816+++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8817@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8818 /* cpus sharing the last level cache: */
8819 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8820 DECLARE_PER_CPU(u16, cpu_llc_id);
8821-DECLARE_PER_CPU(int, cpu_number);
8822+DECLARE_PER_CPU(unsigned int, cpu_number);
8823
8824 static inline struct cpumask *cpu_sibling_mask(int cpu)
8825 {
8826@@ -77,7 +77,7 @@ struct smp_ops {
8827
8828 void (*send_call_func_ipi)(const struct cpumask *mask);
8829 void (*send_call_func_single_ipi)(int cpu);
8830-};
8831+} __no_const;
8832
8833 /* Globals due to paravirt */
8834 extern void set_cpu_sibling_map(int cpu);
8835@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8836 extern int safe_smp_processor_id(void);
8837
8838 #elif defined(CONFIG_X86_64_SMP)
8839-#define raw_smp_processor_id() (percpu_read(cpu_number))
8840-
8841-#define stack_smp_processor_id() \
8842-({ \
8843- struct thread_info *ti; \
8844- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8845- ti->cpu; \
8846-})
8847+#define raw_smp_processor_id() (percpu_read(cpu_number))
8848+#define stack_smp_processor_id() raw_smp_processor_id()
8849 #define safe_smp_processor_id() smp_processor_id()
8850
8851 #endif
8852diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8853--- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8854+++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8855@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8856 static inline void arch_read_lock(arch_rwlock_t *rw)
8857 {
8858 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8859+
8860+#ifdef CONFIG_PAX_REFCOUNT
8861+ "jno 0f\n"
8862+ LOCK_PREFIX " addl $1,(%0)\n"
8863+ "int $4\n0:\n"
8864+ _ASM_EXTABLE(0b, 0b)
8865+#endif
8866+
8867 "jns 1f\n"
8868 "call __read_lock_failed\n\t"
8869 "1:\n"
8870@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8871 static inline void arch_write_lock(arch_rwlock_t *rw)
8872 {
8873 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8874+
8875+#ifdef CONFIG_PAX_REFCOUNT
8876+ "jno 0f\n"
8877+ LOCK_PREFIX " addl %1,(%0)\n"
8878+ "int $4\n0:\n"
8879+ _ASM_EXTABLE(0b, 0b)
8880+#endif
8881+
8882 "jz 1f\n"
8883 "call __write_lock_failed\n\t"
8884 "1:\n"
8885@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8886
8887 static inline void arch_read_unlock(arch_rwlock_t *rw)
8888 {
8889- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8890+ asm volatile(LOCK_PREFIX "incl %0\n"
8891+
8892+#ifdef CONFIG_PAX_REFCOUNT
8893+ "jno 0f\n"
8894+ LOCK_PREFIX "decl %0\n"
8895+ "int $4\n0:\n"
8896+ _ASM_EXTABLE(0b, 0b)
8897+#endif
8898+
8899+ :"+m" (rw->lock) : : "memory");
8900 }
8901
8902 static inline void arch_write_unlock(arch_rwlock_t *rw)
8903 {
8904- asm volatile(LOCK_PREFIX "addl %1, %0"
8905+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8906+
8907+#ifdef CONFIG_PAX_REFCOUNT
8908+ "jno 0f\n"
8909+ LOCK_PREFIX "subl %1, %0\n"
8910+ "int $4\n0:\n"
8911+ _ASM_EXTABLE(0b, 0b)
8912+#endif
8913+
8914 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8915 }
8916
8917diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8918--- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8919+++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8920@@ -48,7 +48,7 @@
8921 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8922 */
8923 #define GDT_STACK_CANARY_INIT \
8924- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8925+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8926
8927 /*
8928 * Initialize the stackprotector canary value.
8929@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8930
8931 static inline void load_stack_canary_segment(void)
8932 {
8933-#ifdef CONFIG_X86_32
8934+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8935 asm volatile ("mov %0, %%gs" : : "r" (0));
8936 #endif
8937 }
8938diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8939--- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8940+++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8941@@ -11,28 +11,20 @@
8942
8943 extern int kstack_depth_to_print;
8944
8945-struct thread_info;
8946+struct task_struct;
8947 struct stacktrace_ops;
8948
8949-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8950- unsigned long *stack,
8951- unsigned long bp,
8952- const struct stacktrace_ops *ops,
8953- void *data,
8954- unsigned long *end,
8955- int *graph);
8956-
8957-extern unsigned long
8958-print_context_stack(struct thread_info *tinfo,
8959- unsigned long *stack, unsigned long bp,
8960- const struct stacktrace_ops *ops, void *data,
8961- unsigned long *end, int *graph);
8962-
8963-extern unsigned long
8964-print_context_stack_bp(struct thread_info *tinfo,
8965- unsigned long *stack, unsigned long bp,
8966- const struct stacktrace_ops *ops, void *data,
8967- unsigned long *end, int *graph);
8968+typedef unsigned long walk_stack_t(struct task_struct *task,
8969+ void *stack_start,
8970+ unsigned long *stack,
8971+ unsigned long bp,
8972+ const struct stacktrace_ops *ops,
8973+ void *data,
8974+ unsigned long *end,
8975+ int *graph);
8976+
8977+extern walk_stack_t print_context_stack;
8978+extern walk_stack_t print_context_stack_bp;
8979
8980 /* Generic stack tracer with callbacks */
8981
8982@@ -40,7 +32,7 @@ struct stacktrace_ops {
8983 void (*address)(void *data, unsigned long address, int reliable);
8984 /* On negative return stop dumping */
8985 int (*stack)(void *data, char *name);
8986- walk_stack_t walk_stack;
8987+ walk_stack_t *walk_stack;
8988 };
8989
8990 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8991diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8992--- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8993+++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8994@@ -129,7 +129,7 @@ do { \
8995 "call __switch_to\n\t" \
8996 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8997 __switch_canary \
8998- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8999+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9000 "movq %%rax,%%rdi\n\t" \
9001 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9002 "jnz ret_from_fork\n\t" \
9003@@ -140,7 +140,7 @@ do { \
9004 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9005 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9006 [_tif_fork] "i" (_TIF_FORK), \
9007- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9008+ [thread_info] "m" (current_tinfo), \
9009 [current_task] "m" (current_task) \
9010 __switch_canary_iparam \
9011 : "memory", "cc" __EXTRA_CLOBBER)
9012@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9013 {
9014 unsigned long __limit;
9015 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9016- return __limit + 1;
9017+ return __limit;
9018 }
9019
9020 static inline void native_clts(void)
9021@@ -397,12 +397,12 @@ void enable_hlt(void);
9022
9023 void cpu_idle_wait(void);
9024
9025-extern unsigned long arch_align_stack(unsigned long sp);
9026+#define arch_align_stack(x) ((x) & ~0xfUL)
9027 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9028
9029 void default_idle(void);
9030
9031-void stop_this_cpu(void *dummy);
9032+void stop_this_cpu(void *dummy) __noreturn;
9033
9034 /*
9035 * Force strict CPU ordering.
9036diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
9037--- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9038+++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9039@@ -10,6 +10,7 @@
9040 #include <linux/compiler.h>
9041 #include <asm/page.h>
9042 #include <asm/types.h>
9043+#include <asm/percpu.h>
9044
9045 /*
9046 * low level task data that entry.S needs immediate access to
9047@@ -24,7 +25,6 @@ struct exec_domain;
9048 #include <asm/atomic.h>
9049
9050 struct thread_info {
9051- struct task_struct *task; /* main task structure */
9052 struct exec_domain *exec_domain; /* execution domain */
9053 __u32 flags; /* low level flags */
9054 __u32 status; /* thread synchronous flags */
9055@@ -34,18 +34,12 @@ struct thread_info {
9056 mm_segment_t addr_limit;
9057 struct restart_block restart_block;
9058 void __user *sysenter_return;
9059-#ifdef CONFIG_X86_32
9060- unsigned long previous_esp; /* ESP of the previous stack in
9061- case of nested (IRQ) stacks
9062- */
9063- __u8 supervisor_stack[0];
9064-#endif
9065+ unsigned long lowest_stack;
9066 int uaccess_err;
9067 };
9068
9069-#define INIT_THREAD_INFO(tsk) \
9070+#define INIT_THREAD_INFO \
9071 { \
9072- .task = &tsk, \
9073 .exec_domain = &default_exec_domain, \
9074 .flags = 0, \
9075 .cpu = 0, \
9076@@ -56,7 +50,7 @@ struct thread_info {
9077 }, \
9078 }
9079
9080-#define init_thread_info (init_thread_union.thread_info)
9081+#define init_thread_info (init_thread_union.stack)
9082 #define init_stack (init_thread_union.stack)
9083
9084 #else /* !__ASSEMBLY__ */
9085@@ -170,6 +164,23 @@ struct thread_info {
9086 ret; \
9087 })
9088
9089+#ifdef __ASSEMBLY__
9090+/* how to get the thread information struct from ASM */
9091+#define GET_THREAD_INFO(reg) \
9092+ mov PER_CPU_VAR(current_tinfo), reg
9093+
9094+/* use this one if reg already contains %esp */
9095+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9096+#else
9097+/* how to get the thread information struct from C */
9098+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9099+
9100+static __always_inline struct thread_info *current_thread_info(void)
9101+{
9102+ return percpu_read_stable(current_tinfo);
9103+}
9104+#endif
9105+
9106 #ifdef CONFIG_X86_32
9107
9108 #define STACK_WARN (THREAD_SIZE/8)
9109@@ -180,35 +191,13 @@ struct thread_info {
9110 */
9111 #ifndef __ASSEMBLY__
9112
9113-
9114 /* how to get the current stack pointer from C */
9115 register unsigned long current_stack_pointer asm("esp") __used;
9116
9117-/* how to get the thread information struct from C */
9118-static inline struct thread_info *current_thread_info(void)
9119-{
9120- return (struct thread_info *)
9121- (current_stack_pointer & ~(THREAD_SIZE - 1));
9122-}
9123-
9124-#else /* !__ASSEMBLY__ */
9125-
9126-/* how to get the thread information struct from ASM */
9127-#define GET_THREAD_INFO(reg) \
9128- movl $-THREAD_SIZE, reg; \
9129- andl %esp, reg
9130-
9131-/* use this one if reg already contains %esp */
9132-#define GET_THREAD_INFO_WITH_ESP(reg) \
9133- andl $-THREAD_SIZE, reg
9134-
9135 #endif
9136
9137 #else /* X86_32 */
9138
9139-#include <asm/percpu.h>
9140-#define KERNEL_STACK_OFFSET (5*8)
9141-
9142 /*
9143 * macros/functions for gaining access to the thread information structure
9144 * preempt_count needs to be 1 initially, until the scheduler is functional.
9145@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9146 #ifndef __ASSEMBLY__
9147 DECLARE_PER_CPU(unsigned long, kernel_stack);
9148
9149-static inline struct thread_info *current_thread_info(void)
9150-{
9151- struct thread_info *ti;
9152- ti = (void *)(percpu_read_stable(kernel_stack) +
9153- KERNEL_STACK_OFFSET - THREAD_SIZE);
9154- return ti;
9155-}
9156-
9157-#else /* !__ASSEMBLY__ */
9158-
9159-/* how to get the thread information struct from ASM */
9160-#define GET_THREAD_INFO(reg) \
9161- movq PER_CPU_VAR(kernel_stack),reg ; \
9162- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9163-
9164+/* how to get the current stack pointer from C */
9165+register unsigned long current_stack_pointer asm("rsp") __used;
9166 #endif
9167
9168 #endif /* !X86_32 */
9169@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9170 extern void free_thread_info(struct thread_info *ti);
9171 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9172 #define arch_task_cache_init arch_task_cache_init
9173+
9174+#define __HAVE_THREAD_FUNCTIONS
9175+#define task_thread_info(task) (&(task)->tinfo)
9176+#define task_stack_page(task) ((task)->stack)
9177+#define setup_thread_stack(p, org) do {} while (0)
9178+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9179+
9180+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9181+extern struct task_struct *alloc_task_struct_node(int node);
9182+extern void free_task_struct(struct task_struct *);
9183+
9184 #endif
9185 #endif /* _ASM_X86_THREAD_INFO_H */
9186diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9187--- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9188+++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9189@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9190 static __always_inline unsigned long __must_check
9191 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9192 {
9193+ pax_track_stack();
9194+
9195+ if ((long)n < 0)
9196+ return n;
9197+
9198 if (__builtin_constant_p(n)) {
9199 unsigned long ret;
9200
9201@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9202 return ret;
9203 }
9204 }
9205+ if (!__builtin_constant_p(n))
9206+ check_object_size(from, n, true);
9207 return __copy_to_user_ll(to, from, n);
9208 }
9209
9210@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9211 __copy_to_user(void __user *to, const void *from, unsigned long n)
9212 {
9213 might_fault();
9214+
9215 return __copy_to_user_inatomic(to, from, n);
9216 }
9217
9218 static __always_inline unsigned long
9219 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9220 {
9221+ if ((long)n < 0)
9222+ return n;
9223+
9224 /* Avoid zeroing the tail if the copy fails..
9225 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9226 * but as the zeroing behaviour is only significant when n is not
9227@@ -137,6 +148,12 @@ static __always_inline unsigned long
9228 __copy_from_user(void *to, const void __user *from, unsigned long n)
9229 {
9230 might_fault();
9231+
9232+ pax_track_stack();
9233+
9234+ if ((long)n < 0)
9235+ return n;
9236+
9237 if (__builtin_constant_p(n)) {
9238 unsigned long ret;
9239
9240@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9241 return ret;
9242 }
9243 }
9244+ if (!__builtin_constant_p(n))
9245+ check_object_size(to, n, false);
9246 return __copy_from_user_ll(to, from, n);
9247 }
9248
9249@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9250 const void __user *from, unsigned long n)
9251 {
9252 might_fault();
9253+
9254+ if ((long)n < 0)
9255+ return n;
9256+
9257 if (__builtin_constant_p(n)) {
9258 unsigned long ret;
9259
9260@@ -181,15 +204,19 @@ static __always_inline unsigned long
9261 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9262 unsigned long n)
9263 {
9264- return __copy_from_user_ll_nocache_nozero(to, from, n);
9265-}
9266+ if ((long)n < 0)
9267+ return n;
9268
9269-unsigned long __must_check copy_to_user(void __user *to,
9270- const void *from, unsigned long n);
9271-unsigned long __must_check _copy_from_user(void *to,
9272- const void __user *from,
9273- unsigned long n);
9274+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9275+}
9276
9277+extern void copy_to_user_overflow(void)
9278+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9279+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9280+#else
9281+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9282+#endif
9283+;
9284
9285 extern void copy_from_user_overflow(void)
9286 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9287@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9288 #endif
9289 ;
9290
9291-static inline unsigned long __must_check copy_from_user(void *to,
9292- const void __user *from,
9293- unsigned long n)
9294+/**
9295+ * copy_to_user: - Copy a block of data into user space.
9296+ * @to: Destination address, in user space.
9297+ * @from: Source address, in kernel space.
9298+ * @n: Number of bytes to copy.
9299+ *
9300+ * Context: User context only. This function may sleep.
9301+ *
9302+ * Copy data from kernel space to user space.
9303+ *
9304+ * Returns number of bytes that could not be copied.
9305+ * On success, this will be zero.
9306+ */
9307+static inline unsigned long __must_check
9308+copy_to_user(void __user *to, const void *from, unsigned long n)
9309+{
9310+ int sz = __compiletime_object_size(from);
9311+
9312+ if (unlikely(sz != -1 && sz < n))
9313+ copy_to_user_overflow();
9314+ else if (access_ok(VERIFY_WRITE, to, n))
9315+ n = __copy_to_user(to, from, n);
9316+ return n;
9317+}
9318+
9319+/**
9320+ * copy_from_user: - Copy a block of data from user space.
9321+ * @to: Destination address, in kernel space.
9322+ * @from: Source address, in user space.
9323+ * @n: Number of bytes to copy.
9324+ *
9325+ * Context: User context only. This function may sleep.
9326+ *
9327+ * Copy data from user space to kernel space.
9328+ *
9329+ * Returns number of bytes that could not be copied.
9330+ * On success, this will be zero.
9331+ *
9332+ * If some data could not be copied, this function will pad the copied
9333+ * data to the requested size using zero bytes.
9334+ */
9335+static inline unsigned long __must_check
9336+copy_from_user(void *to, const void __user *from, unsigned long n)
9337 {
9338 int sz = __compiletime_object_size(to);
9339
9340- if (likely(sz == -1 || sz >= n))
9341- n = _copy_from_user(to, from, n);
9342- else
9343+ if (unlikely(sz != -1 && sz < n))
9344 copy_from_user_overflow();
9345-
9346+ else if (access_ok(VERIFY_READ, from, n))
9347+ n = __copy_from_user(to, from, n);
9348+ else if ((long)n > 0) {
9349+ if (!__builtin_constant_p(n))
9350+ check_object_size(to, n, false);
9351+ memset(to, 0, n);
9352+ }
9353 return n;
9354 }
9355
9356diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9357--- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9358+++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9359@@ -10,6 +10,9 @@
9360 #include <asm/alternative.h>
9361 #include <asm/cpufeature.h>
9362 #include <asm/page.h>
9363+#include <asm/pgtable.h>
9364+
9365+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9366
9367 /*
9368 * Copy To/From Userspace
9369@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9370 return ret;
9371 }
9372
9373-__must_check unsigned long
9374-_copy_to_user(void __user *to, const void *from, unsigned len);
9375-__must_check unsigned long
9376-_copy_from_user(void *to, const void __user *from, unsigned len);
9377+static __always_inline __must_check unsigned long
9378+__copy_to_user(void __user *to, const void *from, unsigned len);
9379+static __always_inline __must_check unsigned long
9380+__copy_from_user(void *to, const void __user *from, unsigned len);
9381 __must_check unsigned long
9382 copy_in_user(void __user *to, const void __user *from, unsigned len);
9383
9384 static inline unsigned long __must_check copy_from_user(void *to,
9385 const void __user *from,
9386- unsigned long n)
9387+ unsigned n)
9388 {
9389- int sz = __compiletime_object_size(to);
9390-
9391 might_fault();
9392- if (likely(sz == -1 || sz >= n))
9393- n = _copy_from_user(to, from, n);
9394-#ifdef CONFIG_DEBUG_VM
9395- else
9396- WARN(1, "Buffer overflow detected!\n");
9397-#endif
9398+
9399+ if (access_ok(VERIFY_READ, from, n))
9400+ n = __copy_from_user(to, from, n);
9401+ else if ((int)n > 0) {
9402+ if (!__builtin_constant_p(n))
9403+ check_object_size(to, n, false);
9404+ memset(to, 0, n);
9405+ }
9406 return n;
9407 }
9408
9409@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9410 {
9411 might_fault();
9412
9413- return _copy_to_user(dst, src, size);
9414+ if (access_ok(VERIFY_WRITE, dst, size))
9415+ size = __copy_to_user(dst, src, size);
9416+ return size;
9417 }
9418
9419 static __always_inline __must_check
9420-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9421+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9422 {
9423- int ret = 0;
9424+ int sz = __compiletime_object_size(dst);
9425+ unsigned ret = 0;
9426
9427 might_fault();
9428- if (!__builtin_constant_p(size))
9429- return copy_user_generic(dst, (__force void *)src, size);
9430+
9431+ pax_track_stack();
9432+
9433+ if ((int)size < 0)
9434+ return size;
9435+
9436+#ifdef CONFIG_PAX_MEMORY_UDEREF
9437+ if (!__access_ok(VERIFY_READ, src, size))
9438+ return size;
9439+#endif
9440+
9441+ if (unlikely(sz != -1 && sz < size)) {
9442+#ifdef CONFIG_DEBUG_VM
9443+ WARN(1, "Buffer overflow detected!\n");
9444+#endif
9445+ return size;
9446+ }
9447+
9448+ if (!__builtin_constant_p(size)) {
9449+ check_object_size(dst, size, false);
9450+
9451+#ifdef CONFIG_PAX_MEMORY_UDEREF
9452+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9453+ src += PAX_USER_SHADOW_BASE;
9454+#endif
9455+
9456+ return copy_user_generic(dst, (__force const void *)src, size);
9457+ }
9458 switch (size) {
9459- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9460+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9461 ret, "b", "b", "=q", 1);
9462 return ret;
9463- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9464+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9465 ret, "w", "w", "=r", 2);
9466 return ret;
9467- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9468+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9469 ret, "l", "k", "=r", 4);
9470 return ret;
9471- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9472+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9473 ret, "q", "", "=r", 8);
9474 return ret;
9475 case 10:
9476- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9477+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9478 ret, "q", "", "=r", 10);
9479 if (unlikely(ret))
9480 return ret;
9481 __get_user_asm(*(u16 *)(8 + (char *)dst),
9482- (u16 __user *)(8 + (char __user *)src),
9483+ (const u16 __user *)(8 + (const char __user *)src),
9484 ret, "w", "w", "=r", 2);
9485 return ret;
9486 case 16:
9487- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9488+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9489 ret, "q", "", "=r", 16);
9490 if (unlikely(ret))
9491 return ret;
9492 __get_user_asm(*(u64 *)(8 + (char *)dst),
9493- (u64 __user *)(8 + (char __user *)src),
9494+ (const u64 __user *)(8 + (const char __user *)src),
9495 ret, "q", "", "=r", 8);
9496 return ret;
9497 default:
9498- return copy_user_generic(dst, (__force void *)src, size);
9499+
9500+#ifdef CONFIG_PAX_MEMORY_UDEREF
9501+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9502+ src += PAX_USER_SHADOW_BASE;
9503+#endif
9504+
9505+ return copy_user_generic(dst, (__force const void *)src, size);
9506 }
9507 }
9508
9509 static __always_inline __must_check
9510-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9511+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9512 {
9513- int ret = 0;
9514+ int sz = __compiletime_object_size(src);
9515+ unsigned ret = 0;
9516
9517 might_fault();
9518- if (!__builtin_constant_p(size))
9519+
9520+ pax_track_stack();
9521+
9522+ if ((int)size < 0)
9523+ return size;
9524+
9525+#ifdef CONFIG_PAX_MEMORY_UDEREF
9526+ if (!__access_ok(VERIFY_WRITE, dst, size))
9527+ return size;
9528+#endif
9529+
9530+ if (unlikely(sz != -1 && sz < size)) {
9531+#ifdef CONFIG_DEBUG_VM
9532+ WARN(1, "Buffer overflow detected!\n");
9533+#endif
9534+ return size;
9535+ }
9536+
9537+ if (!__builtin_constant_p(size)) {
9538+ check_object_size(src, size, true);
9539+
9540+#ifdef CONFIG_PAX_MEMORY_UDEREF
9541+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9542+ dst += PAX_USER_SHADOW_BASE;
9543+#endif
9544+
9545 return copy_user_generic((__force void *)dst, src, size);
9546+ }
9547 switch (size) {
9548- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9549+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9550 ret, "b", "b", "iq", 1);
9551 return ret;
9552- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9553+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9554 ret, "w", "w", "ir", 2);
9555 return ret;
9556- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9557+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9558 ret, "l", "k", "ir", 4);
9559 return ret;
9560- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9561+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9562 ret, "q", "", "er", 8);
9563 return ret;
9564 case 10:
9565- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9566+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9567 ret, "q", "", "er", 10);
9568 if (unlikely(ret))
9569 return ret;
9570 asm("":::"memory");
9571- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9572+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9573 ret, "w", "w", "ir", 2);
9574 return ret;
9575 case 16:
9576- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9577+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9578 ret, "q", "", "er", 16);
9579 if (unlikely(ret))
9580 return ret;
9581 asm("":::"memory");
9582- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9583+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9584 ret, "q", "", "er", 8);
9585 return ret;
9586 default:
9587+
9588+#ifdef CONFIG_PAX_MEMORY_UDEREF
9589+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9590+ dst += PAX_USER_SHADOW_BASE;
9591+#endif
9592+
9593 return copy_user_generic((__force void *)dst, src, size);
9594 }
9595 }
9596
9597 static __always_inline __must_check
9598-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9599+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9600 {
9601- int ret = 0;
9602+ unsigned ret = 0;
9603
9604 might_fault();
9605- if (!__builtin_constant_p(size))
9606+
9607+ if ((int)size < 0)
9608+ return size;
9609+
9610+#ifdef CONFIG_PAX_MEMORY_UDEREF
9611+ if (!__access_ok(VERIFY_READ, src, size))
9612+ return size;
9613+ if (!__access_ok(VERIFY_WRITE, dst, size))
9614+ return size;
9615+#endif
9616+
9617+ if (!__builtin_constant_p(size)) {
9618+
9619+#ifdef CONFIG_PAX_MEMORY_UDEREF
9620+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9621+ src += PAX_USER_SHADOW_BASE;
9622+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9623+ dst += PAX_USER_SHADOW_BASE;
9624+#endif
9625+
9626 return copy_user_generic((__force void *)dst,
9627- (__force void *)src, size);
9628+ (__force const void *)src, size);
9629+ }
9630 switch (size) {
9631 case 1: {
9632 u8 tmp;
9633- __get_user_asm(tmp, (u8 __user *)src,
9634+ __get_user_asm(tmp, (const u8 __user *)src,
9635 ret, "b", "b", "=q", 1);
9636 if (likely(!ret))
9637 __put_user_asm(tmp, (u8 __user *)dst,
9638@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9639 }
9640 case 2: {
9641 u16 tmp;
9642- __get_user_asm(tmp, (u16 __user *)src,
9643+ __get_user_asm(tmp, (const u16 __user *)src,
9644 ret, "w", "w", "=r", 2);
9645 if (likely(!ret))
9646 __put_user_asm(tmp, (u16 __user *)dst,
9647@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9648
9649 case 4: {
9650 u32 tmp;
9651- __get_user_asm(tmp, (u32 __user *)src,
9652+ __get_user_asm(tmp, (const u32 __user *)src,
9653 ret, "l", "k", "=r", 4);
9654 if (likely(!ret))
9655 __put_user_asm(tmp, (u32 __user *)dst,
9656@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9657 }
9658 case 8: {
9659 u64 tmp;
9660- __get_user_asm(tmp, (u64 __user *)src,
9661+ __get_user_asm(tmp, (const u64 __user *)src,
9662 ret, "q", "", "=r", 8);
9663 if (likely(!ret))
9664 __put_user_asm(tmp, (u64 __user *)dst,
9665@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9666 return ret;
9667 }
9668 default:
9669+
9670+#ifdef CONFIG_PAX_MEMORY_UDEREF
9671+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9672+ src += PAX_USER_SHADOW_BASE;
9673+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9674+ dst += PAX_USER_SHADOW_BASE;
9675+#endif
9676+
9677 return copy_user_generic((__force void *)dst,
9678- (__force void *)src, size);
9679+ (__force const void *)src, size);
9680 }
9681 }
9682
9683@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9684 static __must_check __always_inline int
9685 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9686 {
9687+ pax_track_stack();
9688+
9689+ if ((int)size < 0)
9690+ return size;
9691+
9692+#ifdef CONFIG_PAX_MEMORY_UDEREF
9693+ if (!__access_ok(VERIFY_READ, src, size))
9694+ return size;
9695+
9696+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9697+ src += PAX_USER_SHADOW_BASE;
9698+#endif
9699+
9700 return copy_user_generic(dst, (__force const void *)src, size);
9701 }
9702
9703-static __must_check __always_inline int
9704+static __must_check __always_inline unsigned long
9705 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9706 {
9707+ if ((int)size < 0)
9708+ return size;
9709+
9710+#ifdef CONFIG_PAX_MEMORY_UDEREF
9711+ if (!__access_ok(VERIFY_WRITE, dst, size))
9712+ return size;
9713+
9714+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9715+ dst += PAX_USER_SHADOW_BASE;
9716+#endif
9717+
9718 return copy_user_generic((__force void *)dst, src, size);
9719 }
9720
9721-extern long __copy_user_nocache(void *dst, const void __user *src,
9722+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9723 unsigned size, int zerorest);
9724
9725-static inline int
9726-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9727+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9728 {
9729 might_sleep();
9730+
9731+ if ((int)size < 0)
9732+ return size;
9733+
9734+#ifdef CONFIG_PAX_MEMORY_UDEREF
9735+ if (!__access_ok(VERIFY_READ, src, size))
9736+ return size;
9737+#endif
9738+
9739 return __copy_user_nocache(dst, src, size, 1);
9740 }
9741
9742-static inline int
9743-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9744+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9745 unsigned size)
9746 {
9747+ if ((int)size < 0)
9748+ return size;
9749+
9750+#ifdef CONFIG_PAX_MEMORY_UDEREF
9751+ if (!__access_ok(VERIFY_READ, src, size))
9752+ return size;
9753+#endif
9754+
9755 return __copy_user_nocache(dst, src, size, 0);
9756 }
9757
9758-unsigned long
9759+extern unsigned long
9760 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9761
9762 #endif /* _ASM_X86_UACCESS_64_H */
9763diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9764--- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9765+++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9766@@ -7,12 +7,15 @@
9767 #include <linux/compiler.h>
9768 #include <linux/thread_info.h>
9769 #include <linux/string.h>
9770+#include <linux/sched.h>
9771 #include <asm/asm.h>
9772 #include <asm/page.h>
9773
9774 #define VERIFY_READ 0
9775 #define VERIFY_WRITE 1
9776
9777+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9778+
9779 /*
9780 * The fs value determines whether argument validity checking should be
9781 * performed or not. If get_fs() == USER_DS, checking is performed, with
9782@@ -28,7 +31,12 @@
9783
9784 #define get_ds() (KERNEL_DS)
9785 #define get_fs() (current_thread_info()->addr_limit)
9786+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9787+void __set_fs(mm_segment_t x);
9788+void set_fs(mm_segment_t x);
9789+#else
9790 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9791+#endif
9792
9793 #define segment_eq(a, b) ((a).seg == (b).seg)
9794
9795@@ -76,7 +84,33 @@
9796 * checks that the pointer is in the user space range - after calling
9797 * this function, memory access functions may still return -EFAULT.
9798 */
9799-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9800+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9801+#define access_ok(type, addr, size) \
9802+({ \
9803+ long __size = size; \
9804+ unsigned long __addr = (unsigned long)addr; \
9805+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9806+ unsigned long __end_ao = __addr + __size - 1; \
9807+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9808+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9809+ while(__addr_ao <= __end_ao) { \
9810+ char __c_ao; \
9811+ __addr_ao += PAGE_SIZE; \
9812+ if (__size > PAGE_SIZE) \
9813+ cond_resched(); \
9814+ if (__get_user(__c_ao, (char __user *)__addr)) \
9815+ break; \
9816+ if (type != VERIFY_WRITE) { \
9817+ __addr = __addr_ao; \
9818+ continue; \
9819+ } \
9820+ if (__put_user(__c_ao, (char __user *)__addr)) \
9821+ break; \
9822+ __addr = __addr_ao; \
9823+ } \
9824+ } \
9825+ __ret_ao; \
9826+})
9827
9828 /*
9829 * The exception table consists of pairs of addresses: the first is the
9830@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9831 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9832 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9833
9834-
9835+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9836+#define __copyuser_seg "gs;"
9837+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9838+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9839+#else
9840+#define __copyuser_seg
9841+#define __COPYUSER_SET_ES
9842+#define __COPYUSER_RESTORE_ES
9843+#endif
9844
9845 #ifdef CONFIG_X86_32
9846 #define __put_user_asm_u64(x, addr, err, errret) \
9847- asm volatile("1: movl %%eax,0(%2)\n" \
9848- "2: movl %%edx,4(%2)\n" \
9849+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9850+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9851 "3:\n" \
9852 ".section .fixup,\"ax\"\n" \
9853 "4: movl %3,%0\n" \
9854@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9855 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9856
9857 #define __put_user_asm_ex_u64(x, addr) \
9858- asm volatile("1: movl %%eax,0(%1)\n" \
9859- "2: movl %%edx,4(%1)\n" \
9860+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9861+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9862 "3:\n" \
9863 _ASM_EXTABLE(1b, 2b - 1b) \
9864 _ASM_EXTABLE(2b, 3b - 2b) \
9865@@ -373,7 +415,7 @@ do { \
9866 } while (0)
9867
9868 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9869- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9870+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9871 "2:\n" \
9872 ".section .fixup,\"ax\"\n" \
9873 "3: mov %3,%0\n" \
9874@@ -381,7 +423,7 @@ do { \
9875 " jmp 2b\n" \
9876 ".previous\n" \
9877 _ASM_EXTABLE(1b, 3b) \
9878- : "=r" (err), ltype(x) \
9879+ : "=r" (err), ltype (x) \
9880 : "m" (__m(addr)), "i" (errret), "0" (err))
9881
9882 #define __get_user_size_ex(x, ptr, size) \
9883@@ -406,7 +448,7 @@ do { \
9884 } while (0)
9885
9886 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9887- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9888+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9889 "2:\n" \
9890 _ASM_EXTABLE(1b, 2b - 1b) \
9891 : ltype(x) : "m" (__m(addr)))
9892@@ -423,13 +465,24 @@ do { \
9893 int __gu_err; \
9894 unsigned long __gu_val; \
9895 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9896- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9897+ (x) = (__typeof__(*(ptr)))__gu_val; \
9898 __gu_err; \
9899 })
9900
9901 /* FIXME: this hack is definitely wrong -AK */
9902 struct __large_struct { unsigned long buf[100]; };
9903-#define __m(x) (*(struct __large_struct __user *)(x))
9904+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9905+#define ____m(x) \
9906+({ \
9907+ unsigned long ____x = (unsigned long)(x); \
9908+ if (____x < PAX_USER_SHADOW_BASE) \
9909+ ____x += PAX_USER_SHADOW_BASE; \
9910+ (void __user *)____x; \
9911+})
9912+#else
9913+#define ____m(x) (x)
9914+#endif
9915+#define __m(x) (*(struct __large_struct __user *)____m(x))
9916
9917 /*
9918 * Tell gcc we read from memory instead of writing: this is because
9919@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9920 * aliasing issues.
9921 */
9922 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9923- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9924+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9925 "2:\n" \
9926 ".section .fixup,\"ax\"\n" \
9927 "3: mov %3,%0\n" \
9928@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9929 ".previous\n" \
9930 _ASM_EXTABLE(1b, 3b) \
9931 : "=r"(err) \
9932- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9933+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9934
9935 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9936- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9937+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9938 "2:\n" \
9939 _ASM_EXTABLE(1b, 2b - 1b) \
9940 : : ltype(x), "m" (__m(addr)))
9941@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9942 * On error, the variable @x is set to zero.
9943 */
9944
9945+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9946+#define __get_user(x, ptr) get_user((x), (ptr))
9947+#else
9948 #define __get_user(x, ptr) \
9949 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9950+#endif
9951
9952 /**
9953 * __put_user: - Write a simple value into user space, with less checking.
9954@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9955 * Returns zero on success, or -EFAULT on error.
9956 */
9957
9958+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9959+#define __put_user(x, ptr) put_user((x), (ptr))
9960+#else
9961 #define __put_user(x, ptr) \
9962 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9963+#endif
9964
9965 #define __get_user_unaligned __get_user
9966 #define __put_user_unaligned __put_user
9967@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9968 #define get_user_ex(x, ptr) do { \
9969 unsigned long __gue_val; \
9970 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9971- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9972+ (x) = (__typeof__(*(ptr)))__gue_val; \
9973 } while (0)
9974
9975 #ifdef CONFIG_X86_WP_WORKS_OK
9976diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9977--- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9978+++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9979@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9980 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9981 void (*find_smp_config)(void);
9982 void (*get_smp_config)(unsigned int early);
9983-};
9984+} __no_const;
9985
9986 /**
9987 * struct x86_init_resources - platform specific resource related ops
9988@@ -42,7 +42,7 @@ struct x86_init_resources {
9989 void (*probe_roms)(void);
9990 void (*reserve_resources)(void);
9991 char *(*memory_setup)(void);
9992-};
9993+} __no_const;
9994
9995 /**
9996 * struct x86_init_irqs - platform specific interrupt setup
9997@@ -55,7 +55,7 @@ struct x86_init_irqs {
9998 void (*pre_vector_init)(void);
9999 void (*intr_init)(void);
10000 void (*trap_init)(void);
10001-};
10002+} __no_const;
10003
10004 /**
10005 * struct x86_init_oem - oem platform specific customizing functions
10006@@ -65,7 +65,7 @@ struct x86_init_irqs {
10007 struct x86_init_oem {
10008 void (*arch_setup)(void);
10009 void (*banner)(void);
10010-};
10011+} __no_const;
10012
10013 /**
10014 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10015@@ -76,7 +76,7 @@ struct x86_init_oem {
10016 */
10017 struct x86_init_mapping {
10018 void (*pagetable_reserve)(u64 start, u64 end);
10019-};
10020+} __no_const;
10021
10022 /**
10023 * struct x86_init_paging - platform specific paging functions
10024@@ -86,7 +86,7 @@ struct x86_init_mapping {
10025 struct x86_init_paging {
10026 void (*pagetable_setup_start)(pgd_t *base);
10027 void (*pagetable_setup_done)(pgd_t *base);
10028-};
10029+} __no_const;
10030
10031 /**
10032 * struct x86_init_timers - platform specific timer setup
10033@@ -101,7 +101,7 @@ struct x86_init_timers {
10034 void (*tsc_pre_init)(void);
10035 void (*timer_init)(void);
10036 void (*wallclock_init)(void);
10037-};
10038+} __no_const;
10039
10040 /**
10041 * struct x86_init_iommu - platform specific iommu setup
10042@@ -109,7 +109,7 @@ struct x86_init_timers {
10043 */
10044 struct x86_init_iommu {
10045 int (*iommu_init)(void);
10046-};
10047+} __no_const;
10048
10049 /**
10050 * struct x86_init_pci - platform specific pci init functions
10051@@ -123,7 +123,7 @@ struct x86_init_pci {
10052 int (*init)(void);
10053 void (*init_irq)(void);
10054 void (*fixup_irqs)(void);
10055-};
10056+} __no_const;
10057
10058 /**
10059 * struct x86_init_ops - functions for platform specific setup
10060@@ -139,7 +139,7 @@ struct x86_init_ops {
10061 struct x86_init_timers timers;
10062 struct x86_init_iommu iommu;
10063 struct x86_init_pci pci;
10064-};
10065+} __no_const;
10066
10067 /**
10068 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10069@@ -147,7 +147,7 @@ struct x86_init_ops {
10070 */
10071 struct x86_cpuinit_ops {
10072 void (*setup_percpu_clockev)(void);
10073-};
10074+} __no_const;
10075
10076 /**
10077 * struct x86_platform_ops - platform specific runtime functions
10078@@ -166,7 +166,7 @@ struct x86_platform_ops {
10079 bool (*is_untracked_pat_range)(u64 start, u64 end);
10080 void (*nmi_init)(void);
10081 int (*i8042_detect)(void);
10082-};
10083+} __no_const;
10084
10085 struct pci_dev;
10086
10087@@ -174,7 +174,7 @@ struct x86_msi_ops {
10088 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10089 void (*teardown_msi_irq)(unsigned int irq);
10090 void (*teardown_msi_irqs)(struct pci_dev *dev);
10091-};
10092+} __no_const;
10093
10094 extern struct x86_init_ops x86_init;
10095 extern struct x86_cpuinit_ops x86_cpuinit;
10096diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10097--- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10098+++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10099@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10100 {
10101 int err;
10102
10103+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10104+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10105+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10106+#endif
10107+
10108 /*
10109 * Clear the xsave header first, so that reserved fields are
10110 * initialized to zero.
10111@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10112 u32 lmask = mask;
10113 u32 hmask = mask >> 32;
10114
10115+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10116+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10117+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10118+#endif
10119+
10120 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10121 "2:\n"
10122 ".section .fixup,\"ax\"\n"
10123diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10124--- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10125+++ linux-3.0.4/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10126@@ -229,7 +229,7 @@ config X86_HT
10127
10128 config X86_32_LAZY_GS
10129 def_bool y
10130- depends on X86_32 && !CC_STACKPROTECTOR
10131+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10132
10133 config ARCH_HWEIGHT_CFLAGS
10134 string
10135@@ -1018,7 +1018,7 @@ choice
10136
10137 config NOHIGHMEM
10138 bool "off"
10139- depends on !X86_NUMAQ
10140+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10141 ---help---
10142 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10143 However, the address space of 32-bit x86 processors is only 4
10144@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10145
10146 config HIGHMEM4G
10147 bool "4GB"
10148- depends on !X86_NUMAQ
10149+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10150 ---help---
10151 Select this if you have a 32-bit processor and between 1 and 4
10152 gigabytes of physical RAM.
10153@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10154 hex
10155 default 0xB0000000 if VMSPLIT_3G_OPT
10156 default 0x80000000 if VMSPLIT_2G
10157- default 0x78000000 if VMSPLIT_2G_OPT
10158+ default 0x70000000 if VMSPLIT_2G_OPT
10159 default 0x40000000 if VMSPLIT_1G
10160 default 0xC0000000
10161 depends on X86_32
10162@@ -1483,6 +1483,7 @@ config SECCOMP
10163
10164 config CC_STACKPROTECTOR
10165 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10166+ depends on X86_64 || !PAX_MEMORY_UDEREF
10167 ---help---
10168 This option turns on the -fstack-protector GCC feature. This
10169 feature puts, at the beginning of functions, a canary value on
10170@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10171 config PHYSICAL_START
10172 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10173 default "0x1000000"
10174+ range 0x400000 0x40000000
10175 ---help---
10176 This gives the physical address where the kernel is loaded.
10177
10178@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10179 config PHYSICAL_ALIGN
10180 hex "Alignment value to which kernel should be aligned" if X86_32
10181 default "0x1000000"
10182+ range 0x400000 0x1000000 if PAX_KERNEXEC
10183 range 0x2000 0x1000000
10184 ---help---
10185 This value puts the alignment restrictions on physical address
10186@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10187 Say N if you want to disable CPU hotplug.
10188
10189 config COMPAT_VDSO
10190- def_bool y
10191+ def_bool n
10192 prompt "Compat VDSO support"
10193 depends on X86_32 || IA32_EMULATION
10194+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10195 ---help---
10196 Map the 32-bit VDSO to the predictable old-style address too.
10197
10198diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10199--- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10200+++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10201@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10202
10203 config X86_F00F_BUG
10204 def_bool y
10205- depends on M586MMX || M586TSC || M586 || M486 || M386
10206+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10207
10208 config X86_INVD_BUG
10209 def_bool y
10210@@ -362,7 +362,7 @@ config X86_POPAD_OK
10211
10212 config X86_ALIGNMENT_16
10213 def_bool y
10214- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10215+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10216
10217 config X86_INTEL_USERCOPY
10218 def_bool y
10219@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10220 # generates cmov.
10221 config X86_CMOV
10222 def_bool y
10223- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10224+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10225
10226 config X86_MINIMUM_CPU_FAMILY
10227 int
10228diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10229--- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10230+++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10231@@ -81,7 +81,7 @@ config X86_PTDUMP
10232 config DEBUG_RODATA
10233 bool "Write protect kernel read-only data structures"
10234 default y
10235- depends on DEBUG_KERNEL
10236+ depends on DEBUG_KERNEL && BROKEN
10237 ---help---
10238 Mark the kernel read-only data as write-protected in the pagetables,
10239 in order to catch accidental (and incorrect) writes to such const
10240@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10241
10242 config DEBUG_SET_MODULE_RONX
10243 bool "Set loadable kernel module data as NX and text as RO"
10244- depends on MODULES
10245+ depends on MODULES && BROKEN
10246 ---help---
10247 This option helps catch unintended modifications to loadable
10248 kernel module's text and read-only data. It also prevents execution
10249diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10250--- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10251+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10252@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10253 $(call cc-option, -fno-stack-protector) \
10254 $(call cc-option, -mpreferred-stack-boundary=2)
10255 KBUILD_CFLAGS += $(call cc-option, -m32)
10256+ifdef CONSTIFY_PLUGIN
10257+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10258+endif
10259 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10260 GCOV_PROFILE := n
10261
10262diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10263--- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10264+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10265@@ -108,6 +108,9 @@ wakeup_code:
10266 /* Do any other stuff... */
10267
10268 #ifndef CONFIG_64BIT
10269+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10270+ call verify_cpu
10271+
10272 /* This could also be done in C code... */
10273 movl pmode_cr3, %eax
10274 movl %eax, %cr3
10275@@ -131,6 +134,7 @@ wakeup_code:
10276 movl pmode_cr0, %eax
10277 movl %eax, %cr0
10278 jmp pmode_return
10279+# include "../../verify_cpu.S"
10280 #else
10281 pushw $0
10282 pushw trampoline_segment
10283diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10284--- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10285+++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10286@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10287 header->trampoline_segment = trampoline_address() >> 4;
10288 #ifdef CONFIG_SMP
10289 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10290+
10291+ pax_open_kernel();
10292 early_gdt_descr.address =
10293 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10294+ pax_close_kernel();
10295+
10296 initial_gs = per_cpu_offset(smp_processor_id());
10297 #endif
10298 initial_code = (unsigned long)wakeup_long64;
10299diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10300--- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10301+++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10302@@ -30,13 +30,11 @@ wakeup_pmode_return:
10303 # and restore the stack ... but you need gdt for this to work
10304 movl saved_context_esp, %esp
10305
10306- movl %cs:saved_magic, %eax
10307- cmpl $0x12345678, %eax
10308+ cmpl $0x12345678, saved_magic
10309 jne bogus_magic
10310
10311 # jump to place where we left off
10312- movl saved_eip, %eax
10313- jmp *%eax
10314+ jmp *(saved_eip)
10315
10316 bogus_magic:
10317 jmp bogus_magic
10318diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10319--- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10320+++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10321@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10322 if (!*poff || ptr < text || ptr >= text_end)
10323 continue;
10324 /* turn DS segment override prefix into lock prefix */
10325- if (*ptr == 0x3e)
10326+ if (*ktla_ktva(ptr) == 0x3e)
10327 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10328 };
10329 mutex_unlock(&text_mutex);
10330@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10331 if (!*poff || ptr < text || ptr >= text_end)
10332 continue;
10333 /* turn lock prefix into DS segment override prefix */
10334- if (*ptr == 0xf0)
10335+ if (*ktla_ktva(ptr) == 0xf0)
10336 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10337 };
10338 mutex_unlock(&text_mutex);
10339@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10340
10341 BUG_ON(p->len > MAX_PATCH_LEN);
10342 /* prep the buffer with the original instructions */
10343- memcpy(insnbuf, p->instr, p->len);
10344+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10345 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10346 (unsigned long)p->instr, p->len);
10347
10348@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10349 if (smp_alt_once)
10350 free_init_pages("SMP alternatives",
10351 (unsigned long)__smp_locks,
10352- (unsigned long)__smp_locks_end);
10353+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10354
10355 restart_nmi();
10356 }
10357@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10358 * instructions. And on the local CPU you need to be protected again NMI or MCE
10359 * handlers seeing an inconsistent instruction while you patch.
10360 */
10361-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10362+void *__kprobes text_poke_early(void *addr, const void *opcode,
10363 size_t len)
10364 {
10365 unsigned long flags;
10366 local_irq_save(flags);
10367- memcpy(addr, opcode, len);
10368+
10369+ pax_open_kernel();
10370+ memcpy(ktla_ktva(addr), opcode, len);
10371 sync_core();
10372+ pax_close_kernel();
10373+
10374 local_irq_restore(flags);
10375 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10376 that causes hangs on some VIA CPUs. */
10377@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10378 */
10379 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10380 {
10381- unsigned long flags;
10382- char *vaddr;
10383+ unsigned char *vaddr = ktla_ktva(addr);
10384 struct page *pages[2];
10385- int i;
10386+ size_t i;
10387
10388 if (!core_kernel_text((unsigned long)addr)) {
10389- pages[0] = vmalloc_to_page(addr);
10390- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10391+ pages[0] = vmalloc_to_page(vaddr);
10392+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10393 } else {
10394- pages[0] = virt_to_page(addr);
10395+ pages[0] = virt_to_page(vaddr);
10396 WARN_ON(!PageReserved(pages[0]));
10397- pages[1] = virt_to_page(addr + PAGE_SIZE);
10398+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10399 }
10400 BUG_ON(!pages[0]);
10401- local_irq_save(flags);
10402- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10403- if (pages[1])
10404- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10405- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10406- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10407- clear_fixmap(FIX_TEXT_POKE0);
10408- if (pages[1])
10409- clear_fixmap(FIX_TEXT_POKE1);
10410- local_flush_tlb();
10411- sync_core();
10412- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10413- that causes hangs on some VIA CPUs. */
10414+ text_poke_early(addr, opcode, len);
10415 for (i = 0; i < len; i++)
10416- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10417- local_irq_restore(flags);
10418+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10419 return addr;
10420 }
10421
10422diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10423--- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10424+++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10425@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10426 /*
10427 * Debug level, exported for io_apic.c
10428 */
10429-unsigned int apic_verbosity;
10430+int apic_verbosity;
10431
10432 int pic_mode;
10433
10434@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10435 apic_write(APIC_ESR, 0);
10436 v1 = apic_read(APIC_ESR);
10437 ack_APIC_irq();
10438- atomic_inc(&irq_err_count);
10439+ atomic_inc_unchecked(&irq_err_count);
10440
10441 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10442 smp_processor_id(), v0 , v1);
10443@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10444 u16 *bios_cpu_apicid;
10445 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10446
10447+ pax_track_stack();
10448+
10449 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10450 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10451
10452diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10453--- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10454+++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10455@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10456 }
10457 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10458
10459-void lock_vector_lock(void)
10460+void lock_vector_lock(void) __acquires(vector_lock)
10461 {
10462 /* Used to the online set of cpus does not change
10463 * during assign_irq_vector.
10464@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10465 raw_spin_lock(&vector_lock);
10466 }
10467
10468-void unlock_vector_lock(void)
10469+void unlock_vector_lock(void) __releases(vector_lock)
10470 {
10471 raw_spin_unlock(&vector_lock);
10472 }
10473@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10474 ack_APIC_irq();
10475 }
10476
10477-atomic_t irq_mis_count;
10478+atomic_unchecked_t irq_mis_count;
10479
10480 /*
10481 * IO-APIC versions below 0x20 don't support EOI register.
10482@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10483 * at the cpu.
10484 */
10485 if (!(v & (1 << (i & 0x1f)))) {
10486- atomic_inc(&irq_mis_count);
10487+ atomic_inc_unchecked(&irq_mis_count);
10488
10489 eoi_ioapic_irq(irq, cfg);
10490 }
10491diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10492--- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10493+++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10494@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10495 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10496 * even though they are called in protected mode.
10497 */
10498-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10499+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10500 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10501
10502 static const char driver_version[] = "1.16ac"; /* no spaces */
10503@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10504 BUG_ON(cpu != 0);
10505 gdt = get_cpu_gdt_table(cpu);
10506 save_desc_40 = gdt[0x40 / 8];
10507+
10508+ pax_open_kernel();
10509 gdt[0x40 / 8] = bad_bios_desc;
10510+ pax_close_kernel();
10511
10512 apm_irq_save(flags);
10513 APM_DO_SAVE_SEGS;
10514@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10515 &call->esi);
10516 APM_DO_RESTORE_SEGS;
10517 apm_irq_restore(flags);
10518+
10519+ pax_open_kernel();
10520 gdt[0x40 / 8] = save_desc_40;
10521+ pax_close_kernel();
10522+
10523 put_cpu();
10524
10525 return call->eax & 0xff;
10526@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10527 BUG_ON(cpu != 0);
10528 gdt = get_cpu_gdt_table(cpu);
10529 save_desc_40 = gdt[0x40 / 8];
10530+
10531+ pax_open_kernel();
10532 gdt[0x40 / 8] = bad_bios_desc;
10533+ pax_close_kernel();
10534
10535 apm_irq_save(flags);
10536 APM_DO_SAVE_SEGS;
10537@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10538 &call->eax);
10539 APM_DO_RESTORE_SEGS;
10540 apm_irq_restore(flags);
10541+
10542+ pax_open_kernel();
10543 gdt[0x40 / 8] = save_desc_40;
10544+ pax_close_kernel();
10545+
10546 put_cpu();
10547 return error;
10548 }
10549@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10550 * code to that CPU.
10551 */
10552 gdt = get_cpu_gdt_table(0);
10553+
10554+ pax_open_kernel();
10555 set_desc_base(&gdt[APM_CS >> 3],
10556 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10557 set_desc_base(&gdt[APM_CS_16 >> 3],
10558 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10559 set_desc_base(&gdt[APM_DS >> 3],
10560 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10561+ pax_close_kernel();
10562
10563 proc_create("apm", 0, NULL, &apm_file_ops);
10564
10565diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10566--- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10567+++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10568@@ -69,6 +69,7 @@ int main(void)
10569 BLANK();
10570 #undef ENTRY
10571
10572+ DEFINE(TSS_size, sizeof(struct tss_struct));
10573 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10574 BLANK();
10575
10576diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10577--- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10578+++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10579@@ -33,6 +33,8 @@ void common(void) {
10580 OFFSET(TI_status, thread_info, status);
10581 OFFSET(TI_addr_limit, thread_info, addr_limit);
10582 OFFSET(TI_preempt_count, thread_info, preempt_count);
10583+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10584+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10585
10586 BLANK();
10587 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10588@@ -53,8 +55,26 @@ void common(void) {
10589 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10590 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10591 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10592+
10593+#ifdef CONFIG_PAX_KERNEXEC
10594+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10595+#endif
10596+
10597+#ifdef CONFIG_PAX_MEMORY_UDEREF
10598+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10599+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10600+#ifdef CONFIG_X86_64
10601+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10602+#endif
10603 #endif
10604
10605+#endif
10606+
10607+ BLANK();
10608+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10609+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10610+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10611+
10612 #ifdef CONFIG_XEN
10613 BLANK();
10614 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10615diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10616--- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10617+++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10618@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10619 unsigned int size)
10620 {
10621 /* AMD errata T13 (order #21922) */
10622- if ((c->x86 == 6)) {
10623+ if (c->x86 == 6) {
10624 /* Duron Rev A0 */
10625 if (c->x86_model == 3 && c->x86_mask == 0)
10626 size = 64;
10627diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10628--- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10629+++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10630@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10631
10632 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10633
10634-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10635-#ifdef CONFIG_X86_64
10636- /*
10637- * We need valid kernel segments for data and code in long mode too
10638- * IRET will check the segment types kkeil 2000/10/28
10639- * Also sysret mandates a special GDT layout
10640- *
10641- * TLS descriptors are currently at a different place compared to i386.
10642- * Hopefully nobody expects them at a fixed place (Wine?)
10643- */
10644- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10645- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10646- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10647- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10648- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10649- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10650-#else
10651- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10652- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10653- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10654- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10655- /*
10656- * Segments used for calling PnP BIOS have byte granularity.
10657- * They code segments and data segments have fixed 64k limits,
10658- * the transfer segment sizes are set at run time.
10659- */
10660- /* 32-bit code */
10661- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10662- /* 16-bit code */
10663- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10664- /* 16-bit data */
10665- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10666- /* 16-bit data */
10667- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10668- /* 16-bit data */
10669- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10670- /*
10671- * The APM segments have byte granularity and their bases
10672- * are set at run time. All have 64k limits.
10673- */
10674- /* 32-bit code */
10675- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10676- /* 16-bit code */
10677- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10678- /* data */
10679- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10680-
10681- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10682- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10683- GDT_STACK_CANARY_INIT
10684-#endif
10685-} };
10686-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10687-
10688 static int __init x86_xsave_setup(char *s)
10689 {
10690 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10691@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10692 {
10693 struct desc_ptr gdt_descr;
10694
10695- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10696+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10697 gdt_descr.size = GDT_SIZE - 1;
10698 load_gdt(&gdt_descr);
10699 /* Reload the per-cpu base */
10700@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10701 /* Filter out anything that depends on CPUID levels we don't have */
10702 filter_cpuid_features(c, true);
10703
10704+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10705+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10706+#endif
10707+
10708 /* If the model name is still unset, do table lookup. */
10709 if (!c->x86_model_id[0]) {
10710 const char *p;
10711@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10712 }
10713 __setup("clearcpuid=", setup_disablecpuid);
10714
10715+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10716+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10717+
10718 #ifdef CONFIG_X86_64
10719 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10720
10721@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10722 EXPORT_PER_CPU_SYMBOL(current_task);
10723
10724 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10725- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10726+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10727 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10728
10729 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10730@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10731 {
10732 memset(regs, 0, sizeof(struct pt_regs));
10733 regs->fs = __KERNEL_PERCPU;
10734- regs->gs = __KERNEL_STACK_CANARY;
10735+ savesegment(gs, regs->gs);
10736
10737 return regs;
10738 }
10739@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10740 int i;
10741
10742 cpu = stack_smp_processor_id();
10743- t = &per_cpu(init_tss, cpu);
10744+ t = init_tss + cpu;
10745 oist = &per_cpu(orig_ist, cpu);
10746
10747 #ifdef CONFIG_NUMA
10748@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10749 switch_to_new_gdt(cpu);
10750 loadsegment(fs, 0);
10751
10752- load_idt((const struct desc_ptr *)&idt_descr);
10753+ load_idt(&idt_descr);
10754
10755 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10756 syscall_init();
10757@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10758 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10759 barrier();
10760
10761- x86_configure_nx();
10762 if (cpu != 0)
10763 enable_x2apic();
10764
10765@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10766 {
10767 int cpu = smp_processor_id();
10768 struct task_struct *curr = current;
10769- struct tss_struct *t = &per_cpu(init_tss, cpu);
10770+ struct tss_struct *t = init_tss + cpu;
10771 struct thread_struct *thread = &curr->thread;
10772
10773 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10774diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10775--- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10776+++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10777@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10778 * Update the IDT descriptor and reload the IDT so that
10779 * it uses the read-only mapped virtual address.
10780 */
10781- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10782+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10783 load_idt(&idt_descr);
10784 }
10785 #endif
10786diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10787--- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10788+++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10789@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10790 CFLAGS_REMOVE_perf_event.o = -pg
10791 endif
10792
10793-# Make sure load_percpu_segment has no stackprotector
10794-nostackp := $(call cc-option, -fno-stack-protector)
10795-CFLAGS_common.o := $(nostackp)
10796-
10797 obj-y := intel_cacheinfo.o scattered.o topology.o
10798 obj-y += proc.o capflags.o powerflags.o common.o
10799 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10800diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10801--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10802+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10803@@ -46,6 +46,7 @@
10804 #include <asm/ipi.h>
10805 #include <asm/mce.h>
10806 #include <asm/msr.h>
10807+#include <asm/local.h>
10808
10809 #include "mce-internal.h"
10810
10811@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10812 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10813 m->cs, m->ip);
10814
10815- if (m->cs == __KERNEL_CS)
10816+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10817 print_symbol("{%s}", m->ip);
10818 pr_cont("\n");
10819 }
10820@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10821
10822 #define PANIC_TIMEOUT 5 /* 5 seconds */
10823
10824-static atomic_t mce_paniced;
10825+static atomic_unchecked_t mce_paniced;
10826
10827 static int fake_panic;
10828-static atomic_t mce_fake_paniced;
10829+static atomic_unchecked_t mce_fake_paniced;
10830
10831 /* Panic in progress. Enable interrupts and wait for final IPI */
10832 static void wait_for_panic(void)
10833@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10834 /*
10835 * Make sure only one CPU runs in machine check panic
10836 */
10837- if (atomic_inc_return(&mce_paniced) > 1)
10838+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10839 wait_for_panic();
10840 barrier();
10841
10842@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10843 console_verbose();
10844 } else {
10845 /* Don't log too much for fake panic */
10846- if (atomic_inc_return(&mce_fake_paniced) > 1)
10847+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10848 return;
10849 }
10850 /* First print corrected ones that are still unlogged */
10851@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10852 * might have been modified by someone else.
10853 */
10854 rmb();
10855- if (atomic_read(&mce_paniced))
10856+ if (atomic_read_unchecked(&mce_paniced))
10857 wait_for_panic();
10858 if (!monarch_timeout)
10859 goto out;
10860@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10861 */
10862
10863 static DEFINE_SPINLOCK(mce_state_lock);
10864-static int open_count; /* #times opened */
10865+static local_t open_count; /* #times opened */
10866 static int open_exclu; /* already open exclusive? */
10867
10868 static int mce_open(struct inode *inode, struct file *file)
10869 {
10870 spin_lock(&mce_state_lock);
10871
10872- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10873+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10874 spin_unlock(&mce_state_lock);
10875
10876 return -EBUSY;
10877@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10878
10879 if (file->f_flags & O_EXCL)
10880 open_exclu = 1;
10881- open_count++;
10882+ local_inc(&open_count);
10883
10884 spin_unlock(&mce_state_lock);
10885
10886@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10887 {
10888 spin_lock(&mce_state_lock);
10889
10890- open_count--;
10891+ local_dec(&open_count);
10892 open_exclu = 0;
10893
10894 spin_unlock(&mce_state_lock);
10895@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10896 static void mce_reset(void)
10897 {
10898 cpu_missing = 0;
10899- atomic_set(&mce_fake_paniced, 0);
10900+ atomic_set_unchecked(&mce_fake_paniced, 0);
10901 atomic_set(&mce_executing, 0);
10902 atomic_set(&mce_callin, 0);
10903 atomic_set(&global_nwo, 0);
10904diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10905--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10906+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10907@@ -215,7 +215,9 @@ static int inject_init(void)
10908 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10909 return -ENOMEM;
10910 printk(KERN_INFO "Machine check injector initialized\n");
10911- mce_chrdev_ops.write = mce_write;
10912+ pax_open_kernel();
10913+ *(void **)&mce_chrdev_ops.write = mce_write;
10914+ pax_close_kernel();
10915 register_die_notifier(&mce_raise_nb);
10916 return 0;
10917 }
10918diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10919--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
10920+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10921@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10922 u64 size_or_mask, size_and_mask;
10923 static bool mtrr_aps_delayed_init;
10924
10925-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10926+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10927
10928 const struct mtrr_ops *mtrr_if;
10929
10930diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10931--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10932+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10933@@ -25,7 +25,7 @@ struct mtrr_ops {
10934 int (*validate_add_page)(unsigned long base, unsigned long size,
10935 unsigned int type);
10936 int (*have_wrcomb)(void);
10937-};
10938+} __do_const;
10939
10940 extern int generic_get_free_region(unsigned long base, unsigned long size,
10941 int replace_reg);
10942diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10943--- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10944+++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10945@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10946 int i, j, w, wmax, num = 0;
10947 struct hw_perf_event *hwc;
10948
10949+ pax_track_stack();
10950+
10951 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10952
10953 for (i = 0; i < n; i++) {
10954@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10955 break;
10956
10957 perf_callchain_store(entry, frame.return_address);
10958- fp = frame.next_frame;
10959+ fp = (__force const void __user *)frame.next_frame;
10960 }
10961 }
10962
10963diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10964--- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10965+++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10966@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10967 regs = args->regs;
10968
10969 #ifdef CONFIG_X86_32
10970- if (!user_mode_vm(regs)) {
10971+ if (!user_mode(regs)) {
10972 crash_fixup_ss_esp(&fixed_regs, regs);
10973 regs = &fixed_regs;
10974 }
10975diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10976--- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10977+++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10978@@ -11,7 +11,7 @@
10979
10980 #define DOUBLEFAULT_STACKSIZE (1024)
10981 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10982-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10983+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10984
10985 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10986
10987@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10988 unsigned long gdt, tss;
10989
10990 store_gdt(&gdt_desc);
10991- gdt = gdt_desc.address;
10992+ gdt = (unsigned long)gdt_desc.address;
10993
10994 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10995
10996@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10997 /* 0x2 bit is always set */
10998 .flags = X86_EFLAGS_SF | 0x2,
10999 .sp = STACK_START,
11000- .es = __USER_DS,
11001+ .es = __KERNEL_DS,
11002 .cs = __KERNEL_CS,
11003 .ss = __KERNEL_DS,
11004- .ds = __USER_DS,
11005+ .ds = __KERNEL_DS,
11006 .fs = __KERNEL_PERCPU,
11007
11008 .__cr3 = __pa_nodebug(swapper_pg_dir),
11009diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
11010--- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11011+++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11012@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11013 bp = stack_frame(task, regs);
11014
11015 for (;;) {
11016- struct thread_info *context;
11017+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11018
11019- context = (struct thread_info *)
11020- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11021- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11022+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11023
11024- stack = (unsigned long *)context->previous_esp;
11025- if (!stack)
11026+ if (stack_start == task_stack_page(task))
11027 break;
11028+ stack = *(unsigned long **)stack_start;
11029 if (ops->stack(data, "IRQ") < 0)
11030 break;
11031 touch_nmi_watchdog();
11032@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11033 * When in-kernel, we also print out the stack and code at the
11034 * time of the fault..
11035 */
11036- if (!user_mode_vm(regs)) {
11037+ if (!user_mode(regs)) {
11038 unsigned int code_prologue = code_bytes * 43 / 64;
11039 unsigned int code_len = code_bytes;
11040 unsigned char c;
11041 u8 *ip;
11042+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11043
11044 printk(KERN_EMERG "Stack:\n");
11045 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11046
11047 printk(KERN_EMERG "Code: ");
11048
11049- ip = (u8 *)regs->ip - code_prologue;
11050+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11051 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11052 /* try starting at IP */
11053- ip = (u8 *)regs->ip;
11054+ ip = (u8 *)regs->ip + cs_base;
11055 code_len = code_len - code_prologue + 1;
11056 }
11057 for (i = 0; i < code_len; i++, ip++) {
11058@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11059 printk(" Bad EIP value.");
11060 break;
11061 }
11062- if (ip == (u8 *)regs->ip)
11063+ if (ip == (u8 *)regs->ip + cs_base)
11064 printk("<%02x> ", c);
11065 else
11066 printk("%02x ", c);
11067@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11068 {
11069 unsigned short ud2;
11070
11071+ ip = ktla_ktva(ip);
11072 if (ip < PAGE_OFFSET)
11073 return 0;
11074 if (probe_kernel_address((unsigned short *)ip, ud2))
11075diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11076--- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11077+++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11078@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11079 unsigned long *irq_stack_end =
11080 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11081 unsigned used = 0;
11082- struct thread_info *tinfo;
11083 int graph = 0;
11084 unsigned long dummy;
11085+ void *stack_start;
11086
11087 if (!task)
11088 task = current;
11089@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11090 * current stack address. If the stacks consist of nested
11091 * exceptions
11092 */
11093- tinfo = task_thread_info(task);
11094 for (;;) {
11095 char *id;
11096 unsigned long *estack_end;
11097+
11098 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11099 &used, &id);
11100
11101@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11102 if (ops->stack(data, id) < 0)
11103 break;
11104
11105- bp = ops->walk_stack(tinfo, stack, bp, ops,
11106+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11107 data, estack_end, &graph);
11108 ops->stack(data, "<EOE>");
11109 /*
11110@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11111 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11112 if (ops->stack(data, "IRQ") < 0)
11113 break;
11114- bp = ops->walk_stack(tinfo, stack, bp,
11115+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11116 ops, data, irq_stack_end, &graph);
11117 /*
11118 * We link to the next stack (which would be
11119@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11120 /*
11121 * This handles the process stack:
11122 */
11123- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11124+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11125+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11126 put_cpu();
11127 }
11128 EXPORT_SYMBOL(dump_trace);
11129diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11130--- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11131+++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11132@@ -2,6 +2,9 @@
11133 * Copyright (C) 1991, 1992 Linus Torvalds
11134 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11135 */
11136+#ifdef CONFIG_GRKERNSEC_HIDESYM
11137+#define __INCLUDED_BY_HIDESYM 1
11138+#endif
11139 #include <linux/kallsyms.h>
11140 #include <linux/kprobes.h>
11141 #include <linux/uaccess.h>
11142@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11143 static void
11144 print_ftrace_graph_addr(unsigned long addr, void *data,
11145 const struct stacktrace_ops *ops,
11146- struct thread_info *tinfo, int *graph)
11147+ struct task_struct *task, int *graph)
11148 {
11149- struct task_struct *task = tinfo->task;
11150 unsigned long ret_addr;
11151 int index = task->curr_ret_stack;
11152
11153@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11154 static inline void
11155 print_ftrace_graph_addr(unsigned long addr, void *data,
11156 const struct stacktrace_ops *ops,
11157- struct thread_info *tinfo, int *graph)
11158+ struct task_struct *task, int *graph)
11159 { }
11160 #endif
11161
11162@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11163 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11164 */
11165
11166-static inline int valid_stack_ptr(struct thread_info *tinfo,
11167- void *p, unsigned int size, void *end)
11168+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11169 {
11170- void *t = tinfo;
11171 if (end) {
11172 if (p < end && p >= (end-THREAD_SIZE))
11173 return 1;
11174@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11175 }
11176
11177 unsigned long
11178-print_context_stack(struct thread_info *tinfo,
11179+print_context_stack(struct task_struct *task, void *stack_start,
11180 unsigned long *stack, unsigned long bp,
11181 const struct stacktrace_ops *ops, void *data,
11182 unsigned long *end, int *graph)
11183 {
11184 struct stack_frame *frame = (struct stack_frame *)bp;
11185
11186- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11187+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11188 unsigned long addr;
11189
11190 addr = *stack;
11191@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11192 } else {
11193 ops->address(data, addr, 0);
11194 }
11195- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11196+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11197 }
11198 stack++;
11199 }
11200@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11201 EXPORT_SYMBOL_GPL(print_context_stack);
11202
11203 unsigned long
11204-print_context_stack_bp(struct thread_info *tinfo,
11205+print_context_stack_bp(struct task_struct *task, void *stack_start,
11206 unsigned long *stack, unsigned long bp,
11207 const struct stacktrace_ops *ops, void *data,
11208 unsigned long *end, int *graph)
11209@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11210 struct stack_frame *frame = (struct stack_frame *)bp;
11211 unsigned long *ret_addr = &frame->return_address;
11212
11213- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11214+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11215 unsigned long addr = *ret_addr;
11216
11217 if (!__kernel_text_address(addr))
11218@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11219 ops->address(data, addr, 1);
11220 frame = frame->next_frame;
11221 ret_addr = &frame->return_address;
11222- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11223+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11224 }
11225
11226 return (unsigned long)frame;
11227@@ -186,7 +186,7 @@ void dump_stack(void)
11228
11229 bp = stack_frame(current, NULL);
11230 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11231- current->pid, current->comm, print_tainted(),
11232+ task_pid_nr(current), current->comm, print_tainted(),
11233 init_utsname()->release,
11234 (int)strcspn(init_utsname()->version, " "),
11235 init_utsname()->version);
11236@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11237 }
11238 EXPORT_SYMBOL_GPL(oops_begin);
11239
11240+extern void gr_handle_kernel_exploit(void);
11241+
11242 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11243 {
11244 if (regs && kexec_should_crash(current))
11245@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11246 panic("Fatal exception in interrupt");
11247 if (panic_on_oops)
11248 panic("Fatal exception");
11249- do_exit(signr);
11250+
11251+ gr_handle_kernel_exploit();
11252+
11253+ do_group_exit(signr);
11254 }
11255
11256 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11257@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11258
11259 show_registers(regs);
11260 #ifdef CONFIG_X86_32
11261- if (user_mode_vm(regs)) {
11262+ if (user_mode(regs)) {
11263 sp = regs->sp;
11264 ss = regs->ss & 0xffff;
11265 } else {
11266@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11267 unsigned long flags = oops_begin();
11268 int sig = SIGSEGV;
11269
11270- if (!user_mode_vm(regs))
11271+ if (!user_mode(regs))
11272 report_bug(regs->ip, regs);
11273
11274 if (__die(str, regs, err))
11275diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11276--- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11277+++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11278@@ -7,6 +7,7 @@
11279 #include <linux/pci_regs.h>
11280 #include <linux/pci_ids.h>
11281 #include <linux/errno.h>
11282+#include <linux/sched.h>
11283 #include <asm/io.h>
11284 #include <asm/processor.h>
11285 #include <asm/fcntl.h>
11286@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11287 int n;
11288 va_list ap;
11289
11290+ pax_track_stack();
11291+
11292 va_start(ap, fmt);
11293 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11294 early_console->write(early_console, buf, n);
11295diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11296--- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11297+++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11298@@ -185,13 +185,146 @@
11299 /*CFI_REL_OFFSET gs, PT_GS*/
11300 .endm
11301 .macro SET_KERNEL_GS reg
11302+
11303+#ifdef CONFIG_CC_STACKPROTECTOR
11304 movl $(__KERNEL_STACK_CANARY), \reg
11305+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11306+ movl $(__USER_DS), \reg
11307+#else
11308+ xorl \reg, \reg
11309+#endif
11310+
11311 movl \reg, %gs
11312 .endm
11313
11314 #endif /* CONFIG_X86_32_LAZY_GS */
11315
11316-.macro SAVE_ALL
11317+.macro pax_enter_kernel
11318+#ifdef CONFIG_PAX_KERNEXEC
11319+ call pax_enter_kernel
11320+#endif
11321+.endm
11322+
11323+.macro pax_exit_kernel
11324+#ifdef CONFIG_PAX_KERNEXEC
11325+ call pax_exit_kernel
11326+#endif
11327+.endm
11328+
11329+#ifdef CONFIG_PAX_KERNEXEC
11330+ENTRY(pax_enter_kernel)
11331+#ifdef CONFIG_PARAVIRT
11332+ pushl %eax
11333+ pushl %ecx
11334+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11335+ mov %eax, %esi
11336+#else
11337+ mov %cr0, %esi
11338+#endif
11339+ bts $16, %esi
11340+ jnc 1f
11341+ mov %cs, %esi
11342+ cmp $__KERNEL_CS, %esi
11343+ jz 3f
11344+ ljmp $__KERNEL_CS, $3f
11345+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11346+2:
11347+#ifdef CONFIG_PARAVIRT
11348+ mov %esi, %eax
11349+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11350+#else
11351+ mov %esi, %cr0
11352+#endif
11353+3:
11354+#ifdef CONFIG_PARAVIRT
11355+ popl %ecx
11356+ popl %eax
11357+#endif
11358+ ret
11359+ENDPROC(pax_enter_kernel)
11360+
11361+ENTRY(pax_exit_kernel)
11362+#ifdef CONFIG_PARAVIRT
11363+ pushl %eax
11364+ pushl %ecx
11365+#endif
11366+ mov %cs, %esi
11367+ cmp $__KERNEXEC_KERNEL_CS, %esi
11368+ jnz 2f
11369+#ifdef CONFIG_PARAVIRT
11370+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11371+ mov %eax, %esi
11372+#else
11373+ mov %cr0, %esi
11374+#endif
11375+ btr $16, %esi
11376+ ljmp $__KERNEL_CS, $1f
11377+1:
11378+#ifdef CONFIG_PARAVIRT
11379+ mov %esi, %eax
11380+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11381+#else
11382+ mov %esi, %cr0
11383+#endif
11384+2:
11385+#ifdef CONFIG_PARAVIRT
11386+ popl %ecx
11387+ popl %eax
11388+#endif
11389+ ret
11390+ENDPROC(pax_exit_kernel)
11391+#endif
11392+
11393+.macro pax_erase_kstack
11394+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11395+ call pax_erase_kstack
11396+#endif
11397+.endm
11398+
11399+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11400+/*
11401+ * ebp: thread_info
11402+ * ecx, edx: can be clobbered
11403+ */
11404+ENTRY(pax_erase_kstack)
11405+ pushl %edi
11406+ pushl %eax
11407+
11408+ mov TI_lowest_stack(%ebp), %edi
11409+ mov $-0xBEEF, %eax
11410+ std
11411+
11412+1: mov %edi, %ecx
11413+ and $THREAD_SIZE_asm - 1, %ecx
11414+ shr $2, %ecx
11415+ repne scasl
11416+ jecxz 2f
11417+
11418+ cmp $2*16, %ecx
11419+ jc 2f
11420+
11421+ mov $2*16, %ecx
11422+ repe scasl
11423+ jecxz 2f
11424+ jne 1b
11425+
11426+2: cld
11427+ mov %esp, %ecx
11428+ sub %edi, %ecx
11429+ shr $2, %ecx
11430+ rep stosl
11431+
11432+ mov TI_task_thread_sp0(%ebp), %edi
11433+ sub $128, %edi
11434+ mov %edi, TI_lowest_stack(%ebp)
11435+
11436+ popl %eax
11437+ popl %edi
11438+ ret
11439+ENDPROC(pax_erase_kstack)
11440+#endif
11441+
11442+.macro __SAVE_ALL _DS
11443 cld
11444 PUSH_GS
11445 pushl_cfi %fs
11446@@ -214,7 +347,7 @@
11447 CFI_REL_OFFSET ecx, 0
11448 pushl_cfi %ebx
11449 CFI_REL_OFFSET ebx, 0
11450- movl $(__USER_DS), %edx
11451+ movl $\_DS, %edx
11452 movl %edx, %ds
11453 movl %edx, %es
11454 movl $(__KERNEL_PERCPU), %edx
11455@@ -222,6 +355,15 @@
11456 SET_KERNEL_GS %edx
11457 .endm
11458
11459+.macro SAVE_ALL
11460+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11461+ __SAVE_ALL __KERNEL_DS
11462+ pax_enter_kernel
11463+#else
11464+ __SAVE_ALL __USER_DS
11465+#endif
11466+.endm
11467+
11468 .macro RESTORE_INT_REGS
11469 popl_cfi %ebx
11470 CFI_RESTORE ebx
11471@@ -332,7 +474,15 @@ check_userspace:
11472 movb PT_CS(%esp), %al
11473 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11474 cmpl $USER_RPL, %eax
11475+
11476+#ifdef CONFIG_PAX_KERNEXEC
11477+ jae resume_userspace
11478+
11479+ PAX_EXIT_KERNEL
11480+ jmp resume_kernel
11481+#else
11482 jb resume_kernel # not returning to v8086 or userspace
11483+#endif
11484
11485 ENTRY(resume_userspace)
11486 LOCKDEP_SYS_EXIT
11487@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11488 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11489 # int/exception return?
11490 jne work_pending
11491- jmp restore_all
11492+ jmp restore_all_pax
11493 END(ret_from_exception)
11494
11495 #ifdef CONFIG_PREEMPT
11496@@ -394,23 +544,34 @@ sysenter_past_esp:
11497 /*CFI_REL_OFFSET cs, 0*/
11498 /*
11499 * Push current_thread_info()->sysenter_return to the stack.
11500- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11501- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11502 */
11503- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11504+ pushl_cfi $0
11505 CFI_REL_OFFSET eip, 0
11506
11507 pushl_cfi %eax
11508 SAVE_ALL
11509+ GET_THREAD_INFO(%ebp)
11510+ movl TI_sysenter_return(%ebp),%ebp
11511+ movl %ebp,PT_EIP(%esp)
11512 ENABLE_INTERRUPTS(CLBR_NONE)
11513
11514 /*
11515 * Load the potential sixth argument from user stack.
11516 * Careful about security.
11517 */
11518+ movl PT_OLDESP(%esp),%ebp
11519+
11520+#ifdef CONFIG_PAX_MEMORY_UDEREF
11521+ mov PT_OLDSS(%esp),%ds
11522+1: movl %ds:(%ebp),%ebp
11523+ push %ss
11524+ pop %ds
11525+#else
11526 cmpl $__PAGE_OFFSET-3,%ebp
11527 jae syscall_fault
11528 1: movl (%ebp),%ebp
11529+#endif
11530+
11531 movl %ebp,PT_EBP(%esp)
11532 .section __ex_table,"a"
11533 .align 4
11534@@ -433,12 +594,24 @@ sysenter_do_call:
11535 testl $_TIF_ALLWORK_MASK, %ecx
11536 jne sysexit_audit
11537 sysenter_exit:
11538+
11539+#ifdef CONFIG_PAX_RANDKSTACK
11540+ pushl_cfi %eax
11541+ movl %esp, %eax
11542+ call pax_randomize_kstack
11543+ popl_cfi %eax
11544+#endif
11545+
11546+ pax_erase_kstack
11547+
11548 /* if something modifies registers it must also disable sysexit */
11549 movl PT_EIP(%esp), %edx
11550 movl PT_OLDESP(%esp), %ecx
11551 xorl %ebp,%ebp
11552 TRACE_IRQS_ON
11553 1: mov PT_FS(%esp), %fs
11554+2: mov PT_DS(%esp), %ds
11555+3: mov PT_ES(%esp), %es
11556 PTGS_TO_GS
11557 ENABLE_INTERRUPTS_SYSEXIT
11558
11559@@ -455,6 +628,9 @@ sysenter_audit:
11560 movl %eax,%edx /* 2nd arg: syscall number */
11561 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11562 call audit_syscall_entry
11563+
11564+ pax_erase_kstack
11565+
11566 pushl_cfi %ebx
11567 movl PT_EAX(%esp),%eax /* reload syscall number */
11568 jmp sysenter_do_call
11569@@ -481,11 +657,17 @@ sysexit_audit:
11570
11571 CFI_ENDPROC
11572 .pushsection .fixup,"ax"
11573-2: movl $0,PT_FS(%esp)
11574+4: movl $0,PT_FS(%esp)
11575+ jmp 1b
11576+5: movl $0,PT_DS(%esp)
11577+ jmp 1b
11578+6: movl $0,PT_ES(%esp)
11579 jmp 1b
11580 .section __ex_table,"a"
11581 .align 4
11582- .long 1b,2b
11583+ .long 1b,4b
11584+ .long 2b,5b
11585+ .long 3b,6b
11586 .popsection
11587 PTGS_TO_GS_EX
11588 ENDPROC(ia32_sysenter_target)
11589@@ -518,6 +700,15 @@ syscall_exit:
11590 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11591 jne syscall_exit_work
11592
11593+restore_all_pax:
11594+
11595+#ifdef CONFIG_PAX_RANDKSTACK
11596+ movl %esp, %eax
11597+ call pax_randomize_kstack
11598+#endif
11599+
11600+ pax_erase_kstack
11601+
11602 restore_all:
11603 TRACE_IRQS_IRET
11604 restore_all_notrace:
11605@@ -577,14 +768,34 @@ ldt_ss:
11606 * compensating for the offset by changing to the ESPFIX segment with
11607 * a base address that matches for the difference.
11608 */
11609-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11610+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11611 mov %esp, %edx /* load kernel esp */
11612 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11613 mov %dx, %ax /* eax: new kernel esp */
11614 sub %eax, %edx /* offset (low word is 0) */
11615+#ifdef CONFIG_SMP
11616+ movl PER_CPU_VAR(cpu_number), %ebx
11617+ shll $PAGE_SHIFT_asm, %ebx
11618+ addl $cpu_gdt_table, %ebx
11619+#else
11620+ movl $cpu_gdt_table, %ebx
11621+#endif
11622 shr $16, %edx
11623- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11624- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11625+
11626+#ifdef CONFIG_PAX_KERNEXEC
11627+ mov %cr0, %esi
11628+ btr $16, %esi
11629+ mov %esi, %cr0
11630+#endif
11631+
11632+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11633+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11634+
11635+#ifdef CONFIG_PAX_KERNEXEC
11636+ bts $16, %esi
11637+ mov %esi, %cr0
11638+#endif
11639+
11640 pushl_cfi $__ESPFIX_SS
11641 pushl_cfi %eax /* new kernel esp */
11642 /* Disable interrupts, but do not irqtrace this section: we
11643@@ -613,29 +824,23 @@ work_resched:
11644 movl TI_flags(%ebp), %ecx
11645 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11646 # than syscall tracing?
11647- jz restore_all
11648+ jz restore_all_pax
11649 testb $_TIF_NEED_RESCHED, %cl
11650 jnz work_resched
11651
11652 work_notifysig: # deal with pending signals and
11653 # notify-resume requests
11654+ movl %esp, %eax
11655 #ifdef CONFIG_VM86
11656 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11657- movl %esp, %eax
11658- jne work_notifysig_v86 # returning to kernel-space or
11659+ jz 1f # returning to kernel-space or
11660 # vm86-space
11661- xorl %edx, %edx
11662- call do_notify_resume
11663- jmp resume_userspace_sig
11664
11665- ALIGN
11666-work_notifysig_v86:
11667 pushl_cfi %ecx # save ti_flags for do_notify_resume
11668 call save_v86_state # %eax contains pt_regs pointer
11669 popl_cfi %ecx
11670 movl %eax, %esp
11671-#else
11672- movl %esp, %eax
11673+1:
11674 #endif
11675 xorl %edx, %edx
11676 call do_notify_resume
11677@@ -648,6 +853,9 @@ syscall_trace_entry:
11678 movl $-ENOSYS,PT_EAX(%esp)
11679 movl %esp, %eax
11680 call syscall_trace_enter
11681+
11682+ pax_erase_kstack
11683+
11684 /* What it returned is what we'll actually use. */
11685 cmpl $(nr_syscalls), %eax
11686 jnae syscall_call
11687@@ -670,6 +878,10 @@ END(syscall_exit_work)
11688
11689 RING0_INT_FRAME # can't unwind into user space anyway
11690 syscall_fault:
11691+#ifdef CONFIG_PAX_MEMORY_UDEREF
11692+ push %ss
11693+ pop %ds
11694+#endif
11695 GET_THREAD_INFO(%ebp)
11696 movl $-EFAULT,PT_EAX(%esp)
11697 jmp resume_userspace
11698@@ -752,6 +964,36 @@ ptregs_clone:
11699 CFI_ENDPROC
11700 ENDPROC(ptregs_clone)
11701
11702+ ALIGN;
11703+ENTRY(kernel_execve)
11704+ CFI_STARTPROC
11705+ pushl_cfi %ebp
11706+ sub $PT_OLDSS+4,%esp
11707+ pushl_cfi %edi
11708+ pushl_cfi %ecx
11709+ pushl_cfi %eax
11710+ lea 3*4(%esp),%edi
11711+ mov $PT_OLDSS/4+1,%ecx
11712+ xorl %eax,%eax
11713+ rep stosl
11714+ popl_cfi %eax
11715+ popl_cfi %ecx
11716+ popl_cfi %edi
11717+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11718+ pushl_cfi %esp
11719+ call sys_execve
11720+ add $4,%esp
11721+ CFI_ADJUST_CFA_OFFSET -4
11722+ GET_THREAD_INFO(%ebp)
11723+ test %eax,%eax
11724+ jz syscall_exit
11725+ add $PT_OLDSS+4,%esp
11726+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11727+ popl_cfi %ebp
11728+ ret
11729+ CFI_ENDPROC
11730+ENDPROC(kernel_execve)
11731+
11732 .macro FIXUP_ESPFIX_STACK
11733 /*
11734 * Switch back for ESPFIX stack to the normal zerobased stack
11735@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11736 * normal stack and adjusts ESP with the matching offset.
11737 */
11738 /* fixup the stack */
11739- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11740- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11741+#ifdef CONFIG_SMP
11742+ movl PER_CPU_VAR(cpu_number), %ebx
11743+ shll $PAGE_SHIFT_asm, %ebx
11744+ addl $cpu_gdt_table, %ebx
11745+#else
11746+ movl $cpu_gdt_table, %ebx
11747+#endif
11748+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11749+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11750 shl $16, %eax
11751 addl %esp, %eax /* the adjusted stack pointer */
11752 pushl_cfi $__KERNEL_DS
11753@@ -1213,7 +1462,6 @@ return_to_handler:
11754 jmp *%ecx
11755 #endif
11756
11757-.section .rodata,"a"
11758 #include "syscall_table_32.S"
11759
11760 syscall_table_size=(.-sys_call_table)
11761@@ -1259,9 +1507,12 @@ error_code:
11762 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11763 REG_TO_PTGS %ecx
11764 SET_KERNEL_GS %ecx
11765- movl $(__USER_DS), %ecx
11766+ movl $(__KERNEL_DS), %ecx
11767 movl %ecx, %ds
11768 movl %ecx, %es
11769+
11770+ pax_enter_kernel
11771+
11772 TRACE_IRQS_OFF
11773 movl %esp,%eax # pt_regs pointer
11774 call *%edi
11775@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11776 xorl %edx,%edx # zero error code
11777 movl %esp,%eax # pt_regs pointer
11778 call do_nmi
11779+
11780+ pax_exit_kernel
11781+
11782 jmp restore_all_notrace
11783 CFI_ENDPROC
11784
11785@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11786 FIXUP_ESPFIX_STACK # %eax == %esp
11787 xorl %edx,%edx # zero error code
11788 call do_nmi
11789+
11790+ pax_exit_kernel
11791+
11792 RESTORE_REGS
11793 lss 12+4(%esp), %esp # back to espfix stack
11794 CFI_ADJUST_CFA_OFFSET -24
11795diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11796--- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11797+++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-09-17 18:31:51.000000000 -0400
11798@@ -53,6 +53,7 @@
11799 #include <asm/paravirt.h>
11800 #include <asm/ftrace.h>
11801 #include <asm/percpu.h>
11802+#include <asm/pgtable.h>
11803
11804 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11805 #include <linux/elf-em.h>
11806@@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11807 ENDPROC(native_usergs_sysret64)
11808 #endif /* CONFIG_PARAVIRT */
11809
11810+ .macro ljmpq sel, off
11811+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11812+ .byte 0x48; ljmp *1234f(%rip)
11813+ .pushsection .rodata
11814+ .align 16
11815+ 1234: .quad \off; .word \sel
11816+ .popsection
11817+#else
11818+ pushq $\sel
11819+ pushq $\off
11820+ lretq
11821+#endif
11822+ .endm
11823+
11824+ .macro pax_enter_kernel
11825+#ifdef CONFIG_PAX_KERNEXEC
11826+ call pax_enter_kernel
11827+#endif
11828+ .endm
11829+
11830+ .macro pax_exit_kernel
11831+#ifdef CONFIG_PAX_KERNEXEC
11832+ call pax_exit_kernel
11833+#endif
11834+ .endm
11835+
11836+#ifdef CONFIG_PAX_KERNEXEC
11837+ENTRY(pax_enter_kernel)
11838+ pushq %rdi
11839+
11840+#ifdef CONFIG_PARAVIRT
11841+ PV_SAVE_REGS(CLBR_RDI)
11842+#endif
11843+
11844+ GET_CR0_INTO_RDI
11845+ bts $16,%rdi
11846+ jnc 1f
11847+ mov %cs,%edi
11848+ cmp $__KERNEL_CS,%edi
11849+ jz 3f
11850+ ljmpq __KERNEL_CS,3f
11851+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11852+2: SET_RDI_INTO_CR0
11853+3:
11854+
11855+#ifdef CONFIG_PARAVIRT
11856+ PV_RESTORE_REGS(CLBR_RDI)
11857+#endif
11858+
11859+ popq %rdi
11860+ retq
11861+ENDPROC(pax_enter_kernel)
11862+
11863+ENTRY(pax_exit_kernel)
11864+ pushq %rdi
11865+
11866+#ifdef CONFIG_PARAVIRT
11867+ PV_SAVE_REGS(CLBR_RDI)
11868+#endif
11869+
11870+ mov %cs,%rdi
11871+ cmp $__KERNEXEC_KERNEL_CS,%edi
11872+ jnz 2f
11873+ GET_CR0_INTO_RDI
11874+ btr $16,%rdi
11875+ ljmpq __KERNEL_CS,1f
11876+1: SET_RDI_INTO_CR0
11877+2:
11878+
11879+#ifdef CONFIG_PARAVIRT
11880+ PV_RESTORE_REGS(CLBR_RDI);
11881+#endif
11882+
11883+ popq %rdi
11884+ retq
11885+ENDPROC(pax_exit_kernel)
11886+#endif
11887+
11888+ .macro pax_enter_kernel_user
11889+#ifdef CONFIG_PAX_MEMORY_UDEREF
11890+ call pax_enter_kernel_user
11891+#endif
11892+ .endm
11893+
11894+ .macro pax_exit_kernel_user
11895+#ifdef CONFIG_PAX_MEMORY_UDEREF
11896+ call pax_exit_kernel_user
11897+#endif
11898+#ifdef CONFIG_PAX_RANDKSTACK
11899+ push %rax
11900+ call pax_randomize_kstack
11901+ pop %rax
11902+#endif
11903+ .endm
11904+
11905+#ifdef CONFIG_PAX_MEMORY_UDEREF
11906+ENTRY(pax_enter_kernel_user)
11907+ pushq %rdi
11908+ pushq %rbx
11909+
11910+#ifdef CONFIG_PARAVIRT
11911+ PV_SAVE_REGS(CLBR_RDI)
11912+#endif
11913+
11914+ GET_CR3_INTO_RDI
11915+ mov %rdi,%rbx
11916+ add $__START_KERNEL_map,%rbx
11917+ sub phys_base(%rip),%rbx
11918+
11919+#ifdef CONFIG_PARAVIRT
11920+ pushq %rdi
11921+ cmpl $0, pv_info+PARAVIRT_enabled
11922+ jz 1f
11923+ i = 0
11924+ .rept USER_PGD_PTRS
11925+ mov i*8(%rbx),%rsi
11926+ mov $0,%sil
11927+ lea i*8(%rbx),%rdi
11928+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11929+ i = i + 1
11930+ .endr
11931+ jmp 2f
11932+1:
11933+#endif
11934+
11935+ i = 0
11936+ .rept USER_PGD_PTRS
11937+ movb $0,i*8(%rbx)
11938+ i = i + 1
11939+ .endr
11940+
11941+#ifdef CONFIG_PARAVIRT
11942+2: popq %rdi
11943+#endif
11944+ SET_RDI_INTO_CR3
11945+
11946+#ifdef CONFIG_PAX_KERNEXEC
11947+ GET_CR0_INTO_RDI
11948+ bts $16,%rdi
11949+ SET_RDI_INTO_CR0
11950+#endif
11951+
11952+#ifdef CONFIG_PARAVIRT
11953+ PV_RESTORE_REGS(CLBR_RDI)
11954+#endif
11955+
11956+ popq %rbx
11957+ popq %rdi
11958+ retq
11959+ENDPROC(pax_enter_kernel_user)
11960+
11961+ENTRY(pax_exit_kernel_user)
11962+ push %rdi
11963+
11964+#ifdef CONFIG_PARAVIRT
11965+ pushq %rbx
11966+ PV_SAVE_REGS(CLBR_RDI)
11967+#endif
11968+
11969+#ifdef CONFIG_PAX_KERNEXEC
11970+ GET_CR0_INTO_RDI
11971+ btr $16,%rdi
11972+ SET_RDI_INTO_CR0
11973+#endif
11974+
11975+ GET_CR3_INTO_RDI
11976+ add $__START_KERNEL_map,%rdi
11977+ sub phys_base(%rip),%rdi
11978+
11979+#ifdef CONFIG_PARAVIRT
11980+ cmpl $0, pv_info+PARAVIRT_enabled
11981+ jz 1f
11982+ mov %rdi,%rbx
11983+ i = 0
11984+ .rept USER_PGD_PTRS
11985+ mov i*8(%rbx),%rsi
11986+ mov $0x67,%sil
11987+ lea i*8(%rbx),%rdi
11988+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11989+ i = i + 1
11990+ .endr
11991+ jmp 2f
11992+1:
11993+#endif
11994+
11995+ i = 0
11996+ .rept USER_PGD_PTRS
11997+ movb $0x67,i*8(%rdi)
11998+ i = i + 1
11999+ .endr
12000+
12001+#ifdef CONFIG_PARAVIRT
12002+2: PV_RESTORE_REGS(CLBR_RDI)
12003+ popq %rbx
12004+#endif
12005+
12006+ popq %rdi
12007+ retq
12008+ENDPROC(pax_exit_kernel_user)
12009+#endif
12010+
12011+ .macro pax_erase_kstack
12012+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12013+ call pax_erase_kstack
12014+#endif
12015+ .endm
12016+
12017+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12018+/*
12019+ * r10: thread_info
12020+ * rcx, rdx: can be clobbered
12021+ */
12022+ENTRY(pax_erase_kstack)
12023+ pushq %rdi
12024+ pushq %rax
12025+ pushq %r10
12026+
12027+ GET_THREAD_INFO(%r10)
12028+ mov TI_lowest_stack(%r10), %rdi
12029+ mov $-0xBEEF, %rax
12030+ std
12031+
12032+1: mov %edi, %ecx
12033+ and $THREAD_SIZE_asm - 1, %ecx
12034+ shr $3, %ecx
12035+ repne scasq
12036+ jecxz 2f
12037+
12038+ cmp $2*8, %ecx
12039+ jc 2f
12040+
12041+ mov $2*8, %ecx
12042+ repe scasq
12043+ jecxz 2f
12044+ jne 1b
12045+
12046+2: cld
12047+ mov %esp, %ecx
12048+ sub %edi, %ecx
12049+
12050+ cmp $THREAD_SIZE_asm, %rcx
12051+ jb 3f
12052+ ud2
12053+3:
12054+
12055+ shr $3, %ecx
12056+ rep stosq
12057+
12058+ mov TI_task_thread_sp0(%r10), %rdi
12059+ sub $256, %rdi
12060+ mov %rdi, TI_lowest_stack(%r10)
12061+
12062+ popq %r10
12063+ popq %rax
12064+ popq %rdi
12065+ ret
12066+ENDPROC(pax_erase_kstack)
12067+#endif
12068
12069 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12070 #ifdef CONFIG_TRACE_IRQFLAGS
12071@@ -318,7 +577,7 @@ ENTRY(save_args)
12072 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12073 movq_cfi rbp, 8 /* push %rbp */
12074 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12075- testl $3, CS(%rdi)
12076+ testb $3, CS(%rdi)
12077 je 1f
12078 SWAPGS
12079 /*
12080@@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12081
12082 RESTORE_REST
12083
12084- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12085+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12086 je int_ret_from_sys_call
12087
12088 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12089@@ -455,7 +714,7 @@ END(ret_from_fork)
12090 ENTRY(system_call)
12091 CFI_STARTPROC simple
12092 CFI_SIGNAL_FRAME
12093- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12094+ CFI_DEF_CFA rsp,0
12095 CFI_REGISTER rip,rcx
12096 /*CFI_REGISTER rflags,r11*/
12097 SWAPGS_UNSAFE_STACK
12098@@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12099
12100 movq %rsp,PER_CPU_VAR(old_rsp)
12101 movq PER_CPU_VAR(kernel_stack),%rsp
12102+ pax_enter_kernel_user
12103 /*
12104 * No need to follow this irqs off/on section - it's straight
12105 * and short:
12106 */
12107 ENABLE_INTERRUPTS(CLBR_NONE)
12108- SAVE_ARGS 8,1
12109+ SAVE_ARGS 8*6,1
12110 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12111 movq %rcx,RIP-ARGOFFSET(%rsp)
12112 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12113@@ -502,6 +762,8 @@ sysret_check:
12114 andl %edi,%edx
12115 jnz sysret_careful
12116 CFI_REMEMBER_STATE
12117+ pax_exit_kernel_user
12118+ pax_erase_kstack
12119 /*
12120 * sysretq will re-enable interrupts:
12121 */
12122@@ -560,6 +822,9 @@ auditsys:
12123 movq %rax,%rsi /* 2nd arg: syscall number */
12124 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12125 call audit_syscall_entry
12126+
12127+ pax_erase_kstack
12128+
12129 LOAD_ARGS 0 /* reload call-clobbered registers */
12130 jmp system_call_fastpath
12131
12132@@ -590,6 +855,9 @@ tracesys:
12133 FIXUP_TOP_OF_STACK %rdi
12134 movq %rsp,%rdi
12135 call syscall_trace_enter
12136+
12137+ pax_erase_kstack
12138+
12139 /*
12140 * Reload arg registers from stack in case ptrace changed them.
12141 * We don't reload %rax because syscall_trace_enter() returned
12142@@ -611,7 +879,7 @@ tracesys:
12143 GLOBAL(int_ret_from_sys_call)
12144 DISABLE_INTERRUPTS(CLBR_NONE)
12145 TRACE_IRQS_OFF
12146- testl $3,CS-ARGOFFSET(%rsp)
12147+ testb $3,CS-ARGOFFSET(%rsp)
12148 je retint_restore_args
12149 movl $_TIF_ALLWORK_MASK,%edi
12150 /* edi: mask to check */
12151@@ -793,6 +1061,16 @@ END(interrupt)
12152 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12153 call save_args
12154 PARTIAL_FRAME 0
12155+#ifdef CONFIG_PAX_MEMORY_UDEREF
12156+ testb $3, CS(%rdi)
12157+ jnz 1f
12158+ pax_enter_kernel
12159+ jmp 2f
12160+1: pax_enter_kernel_user
12161+2:
12162+#else
12163+ pax_enter_kernel
12164+#endif
12165 call \func
12166 .endm
12167
12168@@ -825,7 +1103,7 @@ ret_from_intr:
12169 CFI_ADJUST_CFA_OFFSET -8
12170 exit_intr:
12171 GET_THREAD_INFO(%rcx)
12172- testl $3,CS-ARGOFFSET(%rsp)
12173+ testb $3,CS-ARGOFFSET(%rsp)
12174 je retint_kernel
12175
12176 /* Interrupt came from user space */
12177@@ -847,12 +1125,18 @@ retint_swapgs: /* return to user-space
12178 * The iretq could re-enable interrupts:
12179 */
12180 DISABLE_INTERRUPTS(CLBR_ANY)
12181+ pax_exit_kernel_user
12182+ pax_erase_kstack
12183 TRACE_IRQS_IRETQ
12184 SWAPGS
12185 jmp restore_args
12186
12187 retint_restore_args: /* return to kernel space */
12188 DISABLE_INTERRUPTS(CLBR_ANY)
12189+ pax_exit_kernel
12190+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12191+ orb $0x80,0x7+RIP-ARGOFFSET(%rsp)
12192+#endif
12193 /*
12194 * The iretq could re-enable interrupts:
12195 */
12196@@ -1027,6 +1311,16 @@ ENTRY(\sym)
12197 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12198 call error_entry
12199 DEFAULT_FRAME 0
12200+#ifdef CONFIG_PAX_MEMORY_UDEREF
12201+ testb $3, CS(%rsp)
12202+ jnz 1f
12203+ pax_enter_kernel
12204+ jmp 2f
12205+1: pax_enter_kernel_user
12206+2:
12207+#else
12208+ pax_enter_kernel
12209+#endif
12210 movq %rsp,%rdi /* pt_regs pointer */
12211 xorl %esi,%esi /* no error code */
12212 call \do_sym
12213@@ -1044,6 +1338,16 @@ ENTRY(\sym)
12214 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12215 call save_paranoid
12216 TRACE_IRQS_OFF
12217+#ifdef CONFIG_PAX_MEMORY_UDEREF
12218+ testb $3, CS(%rsp)
12219+ jnz 1f
12220+ pax_enter_kernel
12221+ jmp 2f
12222+1: pax_enter_kernel_user
12223+2:
12224+#else
12225+ pax_enter_kernel
12226+#endif
12227 movq %rsp,%rdi /* pt_regs pointer */
12228 xorl %esi,%esi /* no error code */
12229 call \do_sym
12230@@ -1052,7 +1356,7 @@ ENTRY(\sym)
12231 END(\sym)
12232 .endm
12233
12234-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12235+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12236 .macro paranoidzeroentry_ist sym do_sym ist
12237 ENTRY(\sym)
12238 INTR_FRAME
12239@@ -1062,8 +1366,24 @@ ENTRY(\sym)
12240 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12241 call save_paranoid
12242 TRACE_IRQS_OFF
12243+#ifdef CONFIG_PAX_MEMORY_UDEREF
12244+ testb $3, CS(%rsp)
12245+ jnz 1f
12246+ pax_enter_kernel
12247+ jmp 2f
12248+1: pax_enter_kernel_user
12249+2:
12250+#else
12251+ pax_enter_kernel
12252+#endif
12253 movq %rsp,%rdi /* pt_regs pointer */
12254 xorl %esi,%esi /* no error code */
12255+#ifdef CONFIG_SMP
12256+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12257+ lea init_tss(%r12), %r12
12258+#else
12259+ lea init_tss(%rip), %r12
12260+#endif
12261 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12262 call \do_sym
12263 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12264@@ -1080,6 +1400,16 @@ ENTRY(\sym)
12265 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12266 call error_entry
12267 DEFAULT_FRAME 0
12268+#ifdef CONFIG_PAX_MEMORY_UDEREF
12269+ testb $3, CS(%rsp)
12270+ jnz 1f
12271+ pax_enter_kernel
12272+ jmp 2f
12273+1: pax_enter_kernel_user
12274+2:
12275+#else
12276+ pax_enter_kernel
12277+#endif
12278 movq %rsp,%rdi /* pt_regs pointer */
12279 movq ORIG_RAX(%rsp),%rsi /* get error code */
12280 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12281@@ -1099,6 +1429,16 @@ ENTRY(\sym)
12282 call save_paranoid
12283 DEFAULT_FRAME 0
12284 TRACE_IRQS_OFF
12285+#ifdef CONFIG_PAX_MEMORY_UDEREF
12286+ testb $3, CS(%rsp)
12287+ jnz 1f
12288+ pax_enter_kernel
12289+ jmp 2f
12290+1: pax_enter_kernel_user
12291+2:
12292+#else
12293+ pax_enter_kernel
12294+#endif
12295 movq %rsp,%rdi /* pt_regs pointer */
12296 movq ORIG_RAX(%rsp),%rsi /* get error code */
12297 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12298@@ -1361,16 +1701,35 @@ ENTRY(paranoid_exit)
12299 TRACE_IRQS_OFF
12300 testl %ebx,%ebx /* swapgs needed? */
12301 jnz paranoid_restore
12302- testl $3,CS(%rsp)
12303+ testb $3,CS(%rsp)
12304 jnz paranoid_userspace
12305+#ifdef CONFIG_PAX_MEMORY_UDEREF
12306+ pax_exit_kernel
12307+ TRACE_IRQS_IRETQ 0
12308+ SWAPGS_UNSAFE_STACK
12309+ RESTORE_ALL 8
12310+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12311+ orb $0x80,0x7(%rsp)
12312+#endif
12313+ jmp irq_return
12314+#endif
12315 paranoid_swapgs:
12316+#ifdef CONFIG_PAX_MEMORY_UDEREF
12317+ pax_exit_kernel_user
12318+#else
12319+ pax_exit_kernel
12320+#endif
12321 TRACE_IRQS_IRETQ 0
12322 SWAPGS_UNSAFE_STACK
12323 RESTORE_ALL 8
12324 jmp irq_return
12325 paranoid_restore:
12326+ pax_exit_kernel
12327 TRACE_IRQS_IRETQ 0
12328 RESTORE_ALL 8
12329+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12330+ orb $0x80,0x7(%rsp)
12331+#endif
12332 jmp irq_return
12333 paranoid_userspace:
12334 GET_THREAD_INFO(%rcx)
12335@@ -1426,7 +1785,7 @@ ENTRY(error_entry)
12336 movq_cfi r14, R14+8
12337 movq_cfi r15, R15+8
12338 xorl %ebx,%ebx
12339- testl $3,CS+8(%rsp)
12340+ testb $3,CS+8(%rsp)
12341 je error_kernelspace
12342 error_swapgs:
12343 SWAPGS
12344@@ -1490,6 +1849,16 @@ ENTRY(nmi)
12345 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12346 call save_paranoid
12347 DEFAULT_FRAME 0
12348+#ifdef CONFIG_PAX_MEMORY_UDEREF
12349+ testb $3, CS(%rsp)
12350+ jnz 1f
12351+ pax_enter_kernel
12352+ jmp 2f
12353+1: pax_enter_kernel_user
12354+2:
12355+#else
12356+ pax_enter_kernel
12357+#endif
12358 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12359 movq %rsp,%rdi
12360 movq $-1,%rsi
12361@@ -1500,12 +1869,32 @@ ENTRY(nmi)
12362 DISABLE_INTERRUPTS(CLBR_NONE)
12363 testl %ebx,%ebx /* swapgs needed? */
12364 jnz nmi_restore
12365- testl $3,CS(%rsp)
12366+ testb $3,CS(%rsp)
12367 jnz nmi_userspace
12368+#ifdef CONFIG_PAX_MEMORY_UDEREF
12369+ pax_exit_kernel
12370+ SWAPGS_UNSAFE_STACK
12371+ RESTORE_ALL 8
12372+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12373+ orb $0x80,0x7(%rsp)
12374+#endif
12375+ jmp irq_return
12376+#endif
12377 nmi_swapgs:
12378+#ifdef CONFIG_PAX_MEMORY_UDEREF
12379+ pax_exit_kernel_user
12380+#else
12381+ pax_exit_kernel
12382+#endif
12383 SWAPGS_UNSAFE_STACK
12384+ RESTORE_ALL 8
12385+ jmp irq_return
12386 nmi_restore:
12387+ pax_exit_kernel
12388 RESTORE_ALL 8
12389+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12390+ orb $0x80,0x7(%rsp)
12391+#endif
12392 jmp irq_return
12393 nmi_userspace:
12394 GET_THREAD_INFO(%rcx)
12395diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12396--- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12397+++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12398@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12399 static const void *mod_code_newcode; /* holds the text to write to the IP */
12400
12401 static unsigned nmi_wait_count;
12402-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12403+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12404
12405 int ftrace_arch_read_dyn_info(char *buf, int size)
12406 {
12407@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12408
12409 r = snprintf(buf, size, "%u %u",
12410 nmi_wait_count,
12411- atomic_read(&nmi_update_count));
12412+ atomic_read_unchecked(&nmi_update_count));
12413 return r;
12414 }
12415
12416@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12417
12418 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12419 smp_rmb();
12420+ pax_open_kernel();
12421 ftrace_mod_code();
12422- atomic_inc(&nmi_update_count);
12423+ pax_close_kernel();
12424+ atomic_inc_unchecked(&nmi_update_count);
12425 }
12426 /* Must have previous changes seen before executions */
12427 smp_mb();
12428@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12429 {
12430 unsigned char replaced[MCOUNT_INSN_SIZE];
12431
12432+ ip = ktla_ktva(ip);
12433+
12434 /*
12435 * Note: Due to modules and __init, code can
12436 * disappear and change, we need to protect against faulting
12437@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12438 unsigned char old[MCOUNT_INSN_SIZE], *new;
12439 int ret;
12440
12441- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12442+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12443 new = ftrace_call_replace(ip, (unsigned long)func);
12444 ret = ftrace_modify_code(ip, old, new);
12445
12446@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12447 {
12448 unsigned char code[MCOUNT_INSN_SIZE];
12449
12450+ ip = ktla_ktva(ip);
12451+
12452 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12453 return -EFAULT;
12454
12455diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12456--- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12457+++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12458@@ -19,6 +19,7 @@
12459 #include <asm/io_apic.h>
12460 #include <asm/bios_ebda.h>
12461 #include <asm/tlbflush.h>
12462+#include <asm/boot.h>
12463
12464 static void __init i386_default_early_setup(void)
12465 {
12466@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12467 {
12468 memblock_init();
12469
12470- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12471+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12472
12473 #ifdef CONFIG_BLK_DEV_INITRD
12474 /* Reserve INITRD */
12475diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12476--- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12477+++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12478@@ -25,6 +25,12 @@
12479 /* Physical address */
12480 #define pa(X) ((X) - __PAGE_OFFSET)
12481
12482+#ifdef CONFIG_PAX_KERNEXEC
12483+#define ta(X) (X)
12484+#else
12485+#define ta(X) ((X) - __PAGE_OFFSET)
12486+#endif
12487+
12488 /*
12489 * References to members of the new_cpu_data structure.
12490 */
12491@@ -54,11 +60,7 @@
12492 * and small than max_low_pfn, otherwise will waste some page table entries
12493 */
12494
12495-#if PTRS_PER_PMD > 1
12496-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12497-#else
12498-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12499-#endif
12500+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12501
12502 /* Number of possible pages in the lowmem region */
12503 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12504@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12505 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12506
12507 /*
12508+ * Real beginning of normal "text" segment
12509+ */
12510+ENTRY(stext)
12511+ENTRY(_stext)
12512+
12513+/*
12514 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12515 * %esi points to the real-mode code as a 32-bit pointer.
12516 * CS and DS must be 4 GB flat segments, but we don't depend on
12517@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12518 * can.
12519 */
12520 __HEAD
12521+
12522+#ifdef CONFIG_PAX_KERNEXEC
12523+ jmp startup_32
12524+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12525+.fill PAGE_SIZE-5,1,0xcc
12526+#endif
12527+
12528 ENTRY(startup_32)
12529 movl pa(stack_start),%ecx
12530
12531@@ -105,6 +120,57 @@ ENTRY(startup_32)
12532 2:
12533 leal -__PAGE_OFFSET(%ecx),%esp
12534
12535+#ifdef CONFIG_SMP
12536+ movl $pa(cpu_gdt_table),%edi
12537+ movl $__per_cpu_load,%eax
12538+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12539+ rorl $16,%eax
12540+ movb %al,__KERNEL_PERCPU + 4(%edi)
12541+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12542+ movl $__per_cpu_end - 1,%eax
12543+ subl $__per_cpu_start,%eax
12544+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12545+#endif
12546+
12547+#ifdef CONFIG_PAX_MEMORY_UDEREF
12548+ movl $NR_CPUS,%ecx
12549+ movl $pa(cpu_gdt_table),%edi
12550+1:
12551+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12552+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12553+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12554+ addl $PAGE_SIZE_asm,%edi
12555+ loop 1b
12556+#endif
12557+
12558+#ifdef CONFIG_PAX_KERNEXEC
12559+ movl $pa(boot_gdt),%edi
12560+ movl $__LOAD_PHYSICAL_ADDR,%eax
12561+ movw %ax,__BOOT_CS + 2(%edi)
12562+ rorl $16,%eax
12563+ movb %al,__BOOT_CS + 4(%edi)
12564+ movb %ah,__BOOT_CS + 7(%edi)
12565+ rorl $16,%eax
12566+
12567+ ljmp $(__BOOT_CS),$1f
12568+1:
12569+
12570+ movl $NR_CPUS,%ecx
12571+ movl $pa(cpu_gdt_table),%edi
12572+ addl $__PAGE_OFFSET,%eax
12573+1:
12574+ movw %ax,__KERNEL_CS + 2(%edi)
12575+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12576+ rorl $16,%eax
12577+ movb %al,__KERNEL_CS + 4(%edi)
12578+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12579+ movb %ah,__KERNEL_CS + 7(%edi)
12580+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12581+ rorl $16,%eax
12582+ addl $PAGE_SIZE_asm,%edi
12583+ loop 1b
12584+#endif
12585+
12586 /*
12587 * Clear BSS first so that there are no surprises...
12588 */
12589@@ -195,8 +261,11 @@ ENTRY(startup_32)
12590 movl %eax, pa(max_pfn_mapped)
12591
12592 /* Do early initialization of the fixmap area */
12593- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12594- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12595+#ifdef CONFIG_COMPAT_VDSO
12596+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12597+#else
12598+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12599+#endif
12600 #else /* Not PAE */
12601
12602 page_pde_offset = (__PAGE_OFFSET >> 20);
12603@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12604 movl %eax, pa(max_pfn_mapped)
12605
12606 /* Do early initialization of the fixmap area */
12607- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12608- movl %eax,pa(initial_page_table+0xffc)
12609+#ifdef CONFIG_COMPAT_VDSO
12610+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12611+#else
12612+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12613+#endif
12614 #endif
12615
12616 #ifdef CONFIG_PARAVIRT
12617@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12618 cmpl $num_subarch_entries, %eax
12619 jae bad_subarch
12620
12621- movl pa(subarch_entries)(,%eax,4), %eax
12622- subl $__PAGE_OFFSET, %eax
12623- jmp *%eax
12624+ jmp *pa(subarch_entries)(,%eax,4)
12625
12626 bad_subarch:
12627 WEAK(lguest_entry)
12628@@ -255,10 +325,10 @@ WEAK(xen_entry)
12629 __INITDATA
12630
12631 subarch_entries:
12632- .long default_entry /* normal x86/PC */
12633- .long lguest_entry /* lguest hypervisor */
12634- .long xen_entry /* Xen hypervisor */
12635- .long default_entry /* Moorestown MID */
12636+ .long ta(default_entry) /* normal x86/PC */
12637+ .long ta(lguest_entry) /* lguest hypervisor */
12638+ .long ta(xen_entry) /* Xen hypervisor */
12639+ .long ta(default_entry) /* Moorestown MID */
12640 num_subarch_entries = (. - subarch_entries) / 4
12641 .previous
12642 #else
12643@@ -312,6 +382,7 @@ default_entry:
12644 orl %edx,%eax
12645 movl %eax,%cr4
12646
12647+#ifdef CONFIG_X86_PAE
12648 testb $X86_CR4_PAE, %al # check if PAE is enabled
12649 jz 6f
12650
12651@@ -340,6 +411,9 @@ default_entry:
12652 /* Make changes effective */
12653 wrmsr
12654
12655+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12656+#endif
12657+
12658 6:
12659
12660 /*
12661@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12662 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12663 movl %eax,%ss # after changing gdt.
12664
12665- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12666+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12667 movl %eax,%ds
12668 movl %eax,%es
12669
12670@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12671 */
12672 cmpb $0,ready
12673 jne 1f
12674- movl $gdt_page,%eax
12675+ movl $cpu_gdt_table,%eax
12676 movl $stack_canary,%ecx
12677+#ifdef CONFIG_SMP
12678+ addl $__per_cpu_load,%ecx
12679+#endif
12680 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12681 shrl $16, %ecx
12682 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12683 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12684 1:
12685-#endif
12686 movl $(__KERNEL_STACK_CANARY),%eax
12687+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12688+ movl $(__USER_DS),%eax
12689+#else
12690+ xorl %eax,%eax
12691+#endif
12692 movl %eax,%gs
12693
12694 xorl %eax,%eax # Clear LDT
12695@@ -558,22 +639,22 @@ early_page_fault:
12696 jmp early_fault
12697
12698 early_fault:
12699- cld
12700 #ifdef CONFIG_PRINTK
12701+ cmpl $1,%ss:early_recursion_flag
12702+ je hlt_loop
12703+ incl %ss:early_recursion_flag
12704+ cld
12705 pusha
12706 movl $(__KERNEL_DS),%eax
12707 movl %eax,%ds
12708 movl %eax,%es
12709- cmpl $2,early_recursion_flag
12710- je hlt_loop
12711- incl early_recursion_flag
12712 movl %cr2,%eax
12713 pushl %eax
12714 pushl %edx /* trapno */
12715 pushl $fault_msg
12716 call printk
12717+; call dump_stack
12718 #endif
12719- call dump_stack
12720 hlt_loop:
12721 hlt
12722 jmp hlt_loop
12723@@ -581,8 +662,11 @@ hlt_loop:
12724 /* This is the default interrupt "handler" :-) */
12725 ALIGN
12726 ignore_int:
12727- cld
12728 #ifdef CONFIG_PRINTK
12729+ cmpl $2,%ss:early_recursion_flag
12730+ je hlt_loop
12731+ incl %ss:early_recursion_flag
12732+ cld
12733 pushl %eax
12734 pushl %ecx
12735 pushl %edx
12736@@ -591,9 +675,6 @@ ignore_int:
12737 movl $(__KERNEL_DS),%eax
12738 movl %eax,%ds
12739 movl %eax,%es
12740- cmpl $2,early_recursion_flag
12741- je hlt_loop
12742- incl early_recursion_flag
12743 pushl 16(%esp)
12744 pushl 24(%esp)
12745 pushl 32(%esp)
12746@@ -622,29 +703,43 @@ ENTRY(initial_code)
12747 /*
12748 * BSS section
12749 */
12750-__PAGE_ALIGNED_BSS
12751- .align PAGE_SIZE
12752 #ifdef CONFIG_X86_PAE
12753+.section .initial_pg_pmd,"a",@progbits
12754 initial_pg_pmd:
12755 .fill 1024*KPMDS,4,0
12756 #else
12757+.section .initial_page_table,"a",@progbits
12758 ENTRY(initial_page_table)
12759 .fill 1024,4,0
12760 #endif
12761+.section .initial_pg_fixmap,"a",@progbits
12762 initial_pg_fixmap:
12763 .fill 1024,4,0
12764+.section .empty_zero_page,"a",@progbits
12765 ENTRY(empty_zero_page)
12766 .fill 4096,1,0
12767+.section .swapper_pg_dir,"a",@progbits
12768 ENTRY(swapper_pg_dir)
12769+#ifdef CONFIG_X86_PAE
12770+ .fill 4,8,0
12771+#else
12772 .fill 1024,4,0
12773+#endif
12774+
12775+/*
12776+ * The IDT has to be page-aligned to simplify the Pentium
12777+ * F0 0F bug workaround.. We have a special link segment
12778+ * for this.
12779+ */
12780+.section .idt,"a",@progbits
12781+ENTRY(idt_table)
12782+ .fill 256,8,0
12783
12784 /*
12785 * This starts the data section.
12786 */
12787 #ifdef CONFIG_X86_PAE
12788-__PAGE_ALIGNED_DATA
12789- /* Page-aligned for the benefit of paravirt? */
12790- .align PAGE_SIZE
12791+.section .initial_page_table,"a",@progbits
12792 ENTRY(initial_page_table)
12793 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12794 # if KPMDS == 3
12795@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12796 # error "Kernel PMDs should be 1, 2 or 3"
12797 # endif
12798 .align PAGE_SIZE /* needs to be page-sized too */
12799+
12800+#ifdef CONFIG_PAX_PER_CPU_PGD
12801+ENTRY(cpu_pgd)
12802+ .rept NR_CPUS
12803+ .fill 4,8,0
12804+ .endr
12805+#endif
12806+
12807 #endif
12808
12809 .data
12810 .balign 4
12811 ENTRY(stack_start)
12812- .long init_thread_union+THREAD_SIZE
12813+ .long init_thread_union+THREAD_SIZE-8
12814+
12815+ready: .byte 0
12816
12817+.section .rodata,"a",@progbits
12818 early_recursion_flag:
12819 .long 0
12820
12821-ready: .byte 0
12822-
12823 int_msg:
12824 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12825
12826@@ -707,7 +811,7 @@ fault_msg:
12827 .word 0 # 32 bit align gdt_desc.address
12828 boot_gdt_descr:
12829 .word __BOOT_DS+7
12830- .long boot_gdt - __PAGE_OFFSET
12831+ .long pa(boot_gdt)
12832
12833 .word 0 # 32-bit align idt_desc.address
12834 idt_descr:
12835@@ -718,7 +822,7 @@ idt_descr:
12836 .word 0 # 32 bit align gdt_desc.address
12837 ENTRY(early_gdt_descr)
12838 .word GDT_ENTRIES*8-1
12839- .long gdt_page /* Overwritten for secondary CPUs */
12840+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12841
12842 /*
12843 * The boot_gdt must mirror the equivalent in setup.S and is
12844@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12845 .align L1_CACHE_BYTES
12846 ENTRY(boot_gdt)
12847 .fill GDT_ENTRY_BOOT_CS,8,0
12848- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12849- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12850+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12851+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12852+
12853+ .align PAGE_SIZE_asm
12854+ENTRY(cpu_gdt_table)
12855+ .rept NR_CPUS
12856+ .quad 0x0000000000000000 /* NULL descriptor */
12857+ .quad 0x0000000000000000 /* 0x0b reserved */
12858+ .quad 0x0000000000000000 /* 0x13 reserved */
12859+ .quad 0x0000000000000000 /* 0x1b reserved */
12860+
12861+#ifdef CONFIG_PAX_KERNEXEC
12862+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12863+#else
12864+ .quad 0x0000000000000000 /* 0x20 unused */
12865+#endif
12866+
12867+ .quad 0x0000000000000000 /* 0x28 unused */
12868+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12869+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12870+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12871+ .quad 0x0000000000000000 /* 0x4b reserved */
12872+ .quad 0x0000000000000000 /* 0x53 reserved */
12873+ .quad 0x0000000000000000 /* 0x5b reserved */
12874+
12875+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12876+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12877+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12878+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12879+
12880+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12881+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12882+
12883+ /*
12884+ * Segments used for calling PnP BIOS have byte granularity.
12885+ * The code segments and data segments have fixed 64k limits,
12886+ * the transfer segment sizes are set at run time.
12887+ */
12888+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12889+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12890+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12891+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12892+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12893+
12894+ /*
12895+ * The APM segments have byte granularity and their bases
12896+ * are set at run time. All have 64k limits.
12897+ */
12898+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12899+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12900+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12901+
12902+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12903+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12904+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12905+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12906+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12907+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12908+
12909+ /* Be sure this is zeroed to avoid false validations in Xen */
12910+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12911+ .endr
12912diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12913--- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12914+++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12915@@ -19,6 +19,7 @@
12916 #include <asm/cache.h>
12917 #include <asm/processor-flags.h>
12918 #include <asm/percpu.h>
12919+#include <asm/cpufeature.h>
12920
12921 #ifdef CONFIG_PARAVIRT
12922 #include <asm/asm-offsets.h>
12923@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12924 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12925 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12926 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12927+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12928+L3_VMALLOC_START = pud_index(VMALLOC_START)
12929+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12930+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12931
12932 .text
12933 __HEAD
12934@@ -85,35 +90,22 @@ startup_64:
12935 */
12936 addq %rbp, init_level4_pgt + 0(%rip)
12937 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12938+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12939+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12940 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12941
12942 addq %rbp, level3_ident_pgt + 0(%rip)
12943+#ifndef CONFIG_XEN
12944+ addq %rbp, level3_ident_pgt + 8(%rip)
12945+#endif
12946
12947- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12948- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12949+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12950
12951- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12952+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12953+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12954
12955- /* Add an Identity mapping if I am above 1G */
12956- leaq _text(%rip), %rdi
12957- andq $PMD_PAGE_MASK, %rdi
12958-
12959- movq %rdi, %rax
12960- shrq $PUD_SHIFT, %rax
12961- andq $(PTRS_PER_PUD - 1), %rax
12962- jz ident_complete
12963-
12964- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12965- leaq level3_ident_pgt(%rip), %rbx
12966- movq %rdx, 0(%rbx, %rax, 8)
12967-
12968- movq %rdi, %rax
12969- shrq $PMD_SHIFT, %rax
12970- andq $(PTRS_PER_PMD - 1), %rax
12971- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12972- leaq level2_spare_pgt(%rip), %rbx
12973- movq %rdx, 0(%rbx, %rax, 8)
12974-ident_complete:
12975+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12976+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12977
12978 /*
12979 * Fixup the kernel text+data virtual addresses. Note that
12980@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12981 * after the boot processor executes this code.
12982 */
12983
12984- /* Enable PAE mode and PGE */
12985- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12986+ /* Enable PAE mode and PSE/PGE */
12987+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12988 movq %rax, %cr4
12989
12990 /* Setup early boot stage 4 level pagetables. */
12991@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12992 movl $MSR_EFER, %ecx
12993 rdmsr
12994 btsl $_EFER_SCE, %eax /* Enable System Call */
12995- btl $20,%edi /* No Execute supported? */
12996+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12997 jnc 1f
12998 btsl $_EFER_NX, %eax
12999+ leaq init_level4_pgt(%rip), %rdi
13000+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13001+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13002+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13003+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13004 1: wrmsr /* Make changes effective */
13005
13006 /* Setup cr0 */
13007@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13008 bad_address:
13009 jmp bad_address
13010
13011- .section ".init.text","ax"
13012+ __INIT
13013 #ifdef CONFIG_EARLY_PRINTK
13014 .globl early_idt_handlers
13015 early_idt_handlers:
13016@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13017 #endif /* EARLY_PRINTK */
13018 1: hlt
13019 jmp 1b
13020+ .previous
13021
13022 #ifdef CONFIG_EARLY_PRINTK
13023+ __INITDATA
13024 early_recursion_flag:
13025 .long 0
13026+ .previous
13027
13028+ .section .rodata,"a",@progbits
13029 early_idt_msg:
13030 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13031 early_idt_ripmsg:
13032 .asciz "RIP %s\n"
13033-#endif /* CONFIG_EARLY_PRINTK */
13034 .previous
13035+#endif /* CONFIG_EARLY_PRINTK */
13036
13037+ .section .rodata,"a",@progbits
13038 #define NEXT_PAGE(name) \
13039 .balign PAGE_SIZE; \
13040 ENTRY(name)
13041@@ -338,7 +340,6 @@ ENTRY(name)
13042 i = i + 1 ; \
13043 .endr
13044
13045- .data
13046 /*
13047 * This default setting generates an ident mapping at address 0x100000
13048 * and a mapping for the kernel that precisely maps virtual address
13049@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13050 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13051 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13052 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13053+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13054+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13055+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13056+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13057 .org init_level4_pgt + L4_START_KERNEL*8, 0
13058 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13059 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13060
13061+#ifdef CONFIG_PAX_PER_CPU_PGD
13062+NEXT_PAGE(cpu_pgd)
13063+ .rept NR_CPUS
13064+ .fill 512,8,0
13065+ .endr
13066+#endif
13067+
13068 NEXT_PAGE(level3_ident_pgt)
13069 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13070+#ifdef CONFIG_XEN
13071 .fill 511,8,0
13072+#else
13073+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13074+ .fill 510,8,0
13075+#endif
13076+
13077+NEXT_PAGE(level3_vmalloc_pgt)
13078+ .fill 512,8,0
13079+
13080+NEXT_PAGE(level3_vmemmap_pgt)
13081+ .fill L3_VMEMMAP_START,8,0
13082+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13083
13084 NEXT_PAGE(level3_kernel_pgt)
13085 .fill L3_START_KERNEL,8,0
13086@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13087 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13088 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13089
13090+NEXT_PAGE(level2_vmemmap_pgt)
13091+ .fill 512,8,0
13092+
13093 NEXT_PAGE(level2_fixmap_pgt)
13094- .fill 506,8,0
13095- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13096- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13097- .fill 5,8,0
13098+ .fill 507,8,0
13099+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13100+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13101+ .fill 4,8,0
13102
13103-NEXT_PAGE(level1_fixmap_pgt)
13104+NEXT_PAGE(level1_vsyscall_pgt)
13105 .fill 512,8,0
13106
13107-NEXT_PAGE(level2_ident_pgt)
13108- /* Since I easily can, map the first 1G.
13109+ /* Since I easily can, map the first 2G.
13110 * Don't set NX because code runs from these pages.
13111 */
13112- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13113+NEXT_PAGE(level2_ident_pgt)
13114+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13115
13116 NEXT_PAGE(level2_kernel_pgt)
13117 /*
13118@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13119 * If you want to increase this then increase MODULES_VADDR
13120 * too.)
13121 */
13122- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13123- KERNEL_IMAGE_SIZE/PMD_SIZE)
13124-
13125-NEXT_PAGE(level2_spare_pgt)
13126- .fill 512, 8, 0
13127+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13128
13129 #undef PMDS
13130 #undef NEXT_PAGE
13131
13132- .data
13133+ .align PAGE_SIZE
13134+ENTRY(cpu_gdt_table)
13135+ .rept NR_CPUS
13136+ .quad 0x0000000000000000 /* NULL descriptor */
13137+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13138+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13139+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13140+ .quad 0x00cffb000000ffff /* __USER32_CS */
13141+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13142+ .quad 0x00affb000000ffff /* __USER_CS */
13143+
13144+#ifdef CONFIG_PAX_KERNEXEC
13145+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13146+#else
13147+ .quad 0x0 /* unused */
13148+#endif
13149+
13150+ .quad 0,0 /* TSS */
13151+ .quad 0,0 /* LDT */
13152+ .quad 0,0,0 /* three TLS descriptors */
13153+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13154+ /* asm/segment.h:GDT_ENTRIES must match this */
13155+
13156+ /* zero the remaining page */
13157+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13158+ .endr
13159+
13160 .align 16
13161 .globl early_gdt_descr
13162 early_gdt_descr:
13163 .word GDT_ENTRIES*8-1
13164 early_gdt_descr_base:
13165- .quad INIT_PER_CPU_VAR(gdt_page)
13166+ .quad cpu_gdt_table
13167
13168 ENTRY(phys_base)
13169 /* This must match the first entry in level2_kernel_pgt */
13170 .quad 0x0000000000000000
13171
13172 #include "../../x86/xen/xen-head.S"
13173-
13174- .section .bss, "aw", @nobits
13175+
13176+ .section .rodata,"a",@progbits
13177 .align L1_CACHE_BYTES
13178 ENTRY(idt_table)
13179- .skip IDT_ENTRIES * 16
13180+ .fill 512,8,0
13181
13182 __PAGE_ALIGNED_BSS
13183 .align PAGE_SIZE
13184diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13185--- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13186+++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13187@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13188 EXPORT_SYMBOL(cmpxchg8b_emu);
13189 #endif
13190
13191+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13192+
13193 /* Networking helper routines. */
13194 EXPORT_SYMBOL(csum_partial_copy_generic);
13195+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13196+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13197
13198 EXPORT_SYMBOL(__get_user_1);
13199 EXPORT_SYMBOL(__get_user_2);
13200@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13201
13202 EXPORT_SYMBOL(csum_partial);
13203 EXPORT_SYMBOL(empty_zero_page);
13204+
13205+#ifdef CONFIG_PAX_KERNEXEC
13206+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13207+#endif
13208diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13209--- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13210+++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13211@@ -210,7 +210,7 @@ spurious_8259A_irq:
13212 "spurious 8259A interrupt: IRQ%d.\n", irq);
13213 spurious_irq_mask |= irqmask;
13214 }
13215- atomic_inc(&irq_err_count);
13216+ atomic_inc_unchecked(&irq_err_count);
13217 /*
13218 * Theoretically we do not have to handle this IRQ,
13219 * but in Linux this does not cause problems and is
13220diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13221--- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13222+++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13223@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13224 * way process stacks are handled. This is done by having a special
13225 * "init_task" linker map entry..
13226 */
13227-union thread_union init_thread_union __init_task_data =
13228- { INIT_THREAD_INFO(init_task) };
13229+union thread_union init_thread_union __init_task_data;
13230
13231 /*
13232 * Initial task structure.
13233@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13234 * section. Since TSS's are completely CPU-local, we want them
13235 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13236 */
13237-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13238-
13239+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13240+EXPORT_SYMBOL(init_tss);
13241diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13242--- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13243+++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13244@@ -6,6 +6,7 @@
13245 #include <linux/sched.h>
13246 #include <linux/kernel.h>
13247 #include <linux/capability.h>
13248+#include <linux/security.h>
13249 #include <linux/errno.h>
13250 #include <linux/types.h>
13251 #include <linux/ioport.h>
13252@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13253
13254 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13255 return -EINVAL;
13256+#ifdef CONFIG_GRKERNSEC_IO
13257+ if (turn_on && grsec_disable_privio) {
13258+ gr_handle_ioperm();
13259+ return -EPERM;
13260+ }
13261+#endif
13262 if (turn_on && !capable(CAP_SYS_RAWIO))
13263 return -EPERM;
13264
13265@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13266 * because the ->io_bitmap_max value must match the bitmap
13267 * contents:
13268 */
13269- tss = &per_cpu(init_tss, get_cpu());
13270+ tss = init_tss + get_cpu();
13271
13272 if (turn_on)
13273 bitmap_clear(t->io_bitmap_ptr, from, num);
13274@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13275 return -EINVAL;
13276 /* Trying to gain more privileges? */
13277 if (level > old) {
13278+#ifdef CONFIG_GRKERNSEC_IO
13279+ if (grsec_disable_privio) {
13280+ gr_handle_iopl();
13281+ return -EPERM;
13282+ }
13283+#endif
13284 if (!capable(CAP_SYS_RAWIO))
13285 return -EPERM;
13286 }
13287diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13288--- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13289+++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13290@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13291 __asm__ __volatile__("andl %%esp,%0" :
13292 "=r" (sp) : "0" (THREAD_SIZE - 1));
13293
13294- return sp < (sizeof(struct thread_info) + STACK_WARN);
13295+ return sp < STACK_WARN;
13296 }
13297
13298 static void print_stack_overflow(void)
13299@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13300 * per-CPU IRQ handling contexts (thread information and stack)
13301 */
13302 union irq_ctx {
13303- struct thread_info tinfo;
13304- u32 stack[THREAD_SIZE/sizeof(u32)];
13305+ unsigned long previous_esp;
13306+ u32 stack[THREAD_SIZE/sizeof(u32)];
13307 } __attribute__((aligned(THREAD_SIZE)));
13308
13309 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13310@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13311 static inline int
13312 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13313 {
13314- union irq_ctx *curctx, *irqctx;
13315+ union irq_ctx *irqctx;
13316 u32 *isp, arg1, arg2;
13317
13318- curctx = (union irq_ctx *) current_thread_info();
13319 irqctx = __this_cpu_read(hardirq_ctx);
13320
13321 /*
13322@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13323 * handler) we can't do that and just have to keep using the
13324 * current stack (which is the irq stack already after all)
13325 */
13326- if (unlikely(curctx == irqctx))
13327+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13328 return 0;
13329
13330 /* build the stack frame on the IRQ stack */
13331- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13332- irqctx->tinfo.task = curctx->tinfo.task;
13333- irqctx->tinfo.previous_esp = current_stack_pointer;
13334+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13335+ irqctx->previous_esp = current_stack_pointer;
13336
13337- /*
13338- * Copy the softirq bits in preempt_count so that the
13339- * softirq checks work in the hardirq context.
13340- */
13341- irqctx->tinfo.preempt_count =
13342- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13343- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13344+#ifdef CONFIG_PAX_MEMORY_UDEREF
13345+ __set_fs(MAKE_MM_SEG(0));
13346+#endif
13347
13348 if (unlikely(overflow))
13349 call_on_stack(print_stack_overflow, isp);
13350@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13351 : "0" (irq), "1" (desc), "2" (isp),
13352 "D" (desc->handle_irq)
13353 : "memory", "cc", "ecx");
13354+
13355+#ifdef CONFIG_PAX_MEMORY_UDEREF
13356+ __set_fs(current_thread_info()->addr_limit);
13357+#endif
13358+
13359 return 1;
13360 }
13361
13362@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13363 */
13364 void __cpuinit irq_ctx_init(int cpu)
13365 {
13366- union irq_ctx *irqctx;
13367-
13368 if (per_cpu(hardirq_ctx, cpu))
13369 return;
13370
13371- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13372- THREAD_FLAGS,
13373- THREAD_ORDER));
13374- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13375- irqctx->tinfo.cpu = cpu;
13376- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13377- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13378-
13379- per_cpu(hardirq_ctx, cpu) = irqctx;
13380-
13381- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13382- THREAD_FLAGS,
13383- THREAD_ORDER));
13384- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13385- irqctx->tinfo.cpu = cpu;
13386- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13387-
13388- per_cpu(softirq_ctx, cpu) = irqctx;
13389+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13390+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13391
13392 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13393 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13394@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13395 asmlinkage void do_softirq(void)
13396 {
13397 unsigned long flags;
13398- struct thread_info *curctx;
13399 union irq_ctx *irqctx;
13400 u32 *isp;
13401
13402@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13403 local_irq_save(flags);
13404
13405 if (local_softirq_pending()) {
13406- curctx = current_thread_info();
13407 irqctx = __this_cpu_read(softirq_ctx);
13408- irqctx->tinfo.task = curctx->task;
13409- irqctx->tinfo.previous_esp = current_stack_pointer;
13410+ irqctx->previous_esp = current_stack_pointer;
13411
13412 /* build the stack frame on the softirq stack */
13413- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13414+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13415+
13416+#ifdef CONFIG_PAX_MEMORY_UDEREF
13417+ __set_fs(MAKE_MM_SEG(0));
13418+#endif
13419
13420 call_on_stack(__do_softirq, isp);
13421+
13422+#ifdef CONFIG_PAX_MEMORY_UDEREF
13423+ __set_fs(current_thread_info()->addr_limit);
13424+#endif
13425+
13426 /*
13427 * Shouldn't happen, we returned above if in_interrupt():
13428 */
13429diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13430--- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13431+++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13432@@ -17,7 +17,7 @@
13433 #include <asm/mce.h>
13434 #include <asm/hw_irq.h>
13435
13436-atomic_t irq_err_count;
13437+atomic_unchecked_t irq_err_count;
13438
13439 /* Function pointer for generic interrupt vector handling */
13440 void (*x86_platform_ipi_callback)(void) = NULL;
13441@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13442 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13443 seq_printf(p, " Machine check polls\n");
13444 #endif
13445- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13446+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13447 #if defined(CONFIG_X86_IO_APIC)
13448- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13449+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13450 #endif
13451 return 0;
13452 }
13453@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13454
13455 u64 arch_irq_stat(void)
13456 {
13457- u64 sum = atomic_read(&irq_err_count);
13458+ u64 sum = atomic_read_unchecked(&irq_err_count);
13459
13460 #ifdef CONFIG_X86_IO_APIC
13461- sum += atomic_read(&irq_mis_count);
13462+ sum += atomic_read_unchecked(&irq_mis_count);
13463 #endif
13464 return sum;
13465 }
13466diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13467--- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13468+++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13469@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13470 #ifdef CONFIG_X86_32
13471 switch (regno) {
13472 case GDB_SS:
13473- if (!user_mode_vm(regs))
13474+ if (!user_mode(regs))
13475 *(unsigned long *)mem = __KERNEL_DS;
13476 break;
13477 case GDB_SP:
13478- if (!user_mode_vm(regs))
13479+ if (!user_mode(regs))
13480 *(unsigned long *)mem = kernel_stack_pointer(regs);
13481 break;
13482 case GDB_GS:
13483@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13484 case 'k':
13485 /* clear the trace bit */
13486 linux_regs->flags &= ~X86_EFLAGS_TF;
13487- atomic_set(&kgdb_cpu_doing_single_step, -1);
13488+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13489
13490 /* set the trace bit if we're stepping */
13491 if (remcomInBuffer[0] == 's') {
13492 linux_regs->flags |= X86_EFLAGS_TF;
13493- atomic_set(&kgdb_cpu_doing_single_step,
13494+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13495 raw_smp_processor_id());
13496 }
13497
13498@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13499 return NOTIFY_DONE;
13500
13501 case DIE_DEBUG:
13502- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13503+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13504 if (user_mode(regs))
13505 return single_step_cont(regs, args);
13506 break;
13507diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13508--- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13509+++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13510@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13511 } __attribute__((packed)) *insn;
13512
13513 insn = (struct __arch_relative_insn *)from;
13514+
13515+ pax_open_kernel();
13516 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13517 insn->op = op;
13518+ pax_close_kernel();
13519 }
13520
13521 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13522@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13523 kprobe_opcode_t opcode;
13524 kprobe_opcode_t *orig_opcodes = opcodes;
13525
13526- if (search_exception_tables((unsigned long)opcodes))
13527+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13528 return 0; /* Page fault may occur on this address. */
13529
13530 retry:
13531@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13532 }
13533 }
13534 insn_get_length(&insn);
13535+ pax_open_kernel();
13536 memcpy(dest, insn.kaddr, insn.length);
13537+ pax_close_kernel();
13538
13539 #ifdef CONFIG_X86_64
13540 if (insn_rip_relative(&insn)) {
13541@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13542 (u8 *) dest;
13543 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13544 disp = (u8 *) dest + insn_offset_displacement(&insn);
13545+ pax_open_kernel();
13546 *(s32 *) disp = (s32) newdisp;
13547+ pax_close_kernel();
13548 }
13549 #endif
13550 return insn.length;
13551@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13552 */
13553 __copy_instruction(p->ainsn.insn, p->addr, 0);
13554
13555- if (can_boost(p->addr))
13556+ if (can_boost(ktla_ktva(p->addr)))
13557 p->ainsn.boostable = 0;
13558 else
13559 p->ainsn.boostable = -1;
13560
13561- p->opcode = *p->addr;
13562+ p->opcode = *(ktla_ktva(p->addr));
13563 }
13564
13565 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13566@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13567 * nor set current_kprobe, because it doesn't use single
13568 * stepping.
13569 */
13570- regs->ip = (unsigned long)p->ainsn.insn;
13571+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13572 preempt_enable_no_resched();
13573 return;
13574 }
13575@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13576 if (p->opcode == BREAKPOINT_INSTRUCTION)
13577 regs->ip = (unsigned long)p->addr;
13578 else
13579- regs->ip = (unsigned long)p->ainsn.insn;
13580+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13581 }
13582
13583 /*
13584@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13585 setup_singlestep(p, regs, kcb, 0);
13586 return 1;
13587 }
13588- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13589+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13590 /*
13591 * The breakpoint instruction was removed right
13592 * after we hit it. Another cpu has removed
13593@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13594 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13595 {
13596 unsigned long *tos = stack_addr(regs);
13597- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13598+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13599 unsigned long orig_ip = (unsigned long)p->addr;
13600 kprobe_opcode_t *insn = p->ainsn.insn;
13601
13602@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13603 struct die_args *args = data;
13604 int ret = NOTIFY_DONE;
13605
13606- if (args->regs && user_mode_vm(args->regs))
13607+ if (args->regs && user_mode(args->regs))
13608 return ret;
13609
13610 switch (val) {
13611@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13612 * Verify if the address gap is in 2GB range, because this uses
13613 * a relative jump.
13614 */
13615- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13616+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13617 if (abs(rel) > 0x7fffffff)
13618 return -ERANGE;
13619
13620@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13621 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13622
13623 /* Set probe function call */
13624- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13625+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13626
13627 /* Set returning jmp instruction at the tail of out-of-line buffer */
13628 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13629- (u8 *)op->kp.addr + op->optinsn.size);
13630+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13631
13632 flush_icache_range((unsigned long) buf,
13633 (unsigned long) buf + TMPL_END_IDX +
13634@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13635 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13636
13637 /* Backup instructions which will be replaced by jump address */
13638- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13639+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13640 RELATIVE_ADDR_SIZE);
13641
13642 insn_buf[0] = RELATIVEJUMP_OPCODE;
13643diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13644--- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13645+++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13646@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13647 pv_mmu_ops.set_pud = kvm_set_pud;
13648 #if PAGETABLE_LEVELS == 4
13649 pv_mmu_ops.set_pgd = kvm_set_pgd;
13650+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13651 #endif
13652 #endif
13653 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13654diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13655--- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13656+++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13657@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13658 if (reload) {
13659 #ifdef CONFIG_SMP
13660 preempt_disable();
13661- load_LDT(pc);
13662+ load_LDT_nolock(pc);
13663 if (!cpumask_equal(mm_cpumask(current->mm),
13664 cpumask_of(smp_processor_id())))
13665 smp_call_function(flush_ldt, current->mm, 1);
13666 preempt_enable();
13667 #else
13668- load_LDT(pc);
13669+ load_LDT_nolock(pc);
13670 #endif
13671 }
13672 if (oldsize) {
13673@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13674 return err;
13675
13676 for (i = 0; i < old->size; i++)
13677- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13678+ write_ldt_entry(new->ldt, i, old->ldt + i);
13679 return 0;
13680 }
13681
13682@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13683 retval = copy_ldt(&mm->context, &old_mm->context);
13684 mutex_unlock(&old_mm->context.lock);
13685 }
13686+
13687+ if (tsk == current) {
13688+ mm->context.vdso = 0;
13689+
13690+#ifdef CONFIG_X86_32
13691+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13692+ mm->context.user_cs_base = 0UL;
13693+ mm->context.user_cs_limit = ~0UL;
13694+
13695+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13696+ cpus_clear(mm->context.cpu_user_cs_mask);
13697+#endif
13698+
13699+#endif
13700+#endif
13701+
13702+ }
13703+
13704 return retval;
13705 }
13706
13707@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13708 }
13709 }
13710
13711+#ifdef CONFIG_PAX_SEGMEXEC
13712+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13713+ error = -EINVAL;
13714+ goto out_unlock;
13715+ }
13716+#endif
13717+
13718 fill_ldt(&ldt, &ldt_info);
13719 if (oldmode)
13720 ldt.avl = 0;
13721diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13722--- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13723+++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13724@@ -27,7 +27,7 @@
13725 #include <asm/cacheflush.h>
13726 #include <asm/debugreg.h>
13727
13728-static void set_idt(void *newidt, __u16 limit)
13729+static void set_idt(struct desc_struct *newidt, __u16 limit)
13730 {
13731 struct desc_ptr curidt;
13732
13733@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13734 }
13735
13736
13737-static void set_gdt(void *newgdt, __u16 limit)
13738+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13739 {
13740 struct desc_ptr curgdt;
13741
13742@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13743 }
13744
13745 control_page = page_address(image->control_code_page);
13746- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13747+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13748
13749 relocate_kernel_ptr = control_page;
13750 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13751diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13752--- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13753+++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13754@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13755
13756 static int get_ucode_user(void *to, const void *from, size_t n)
13757 {
13758- return copy_from_user(to, from, n);
13759+ return copy_from_user(to, (__force const void __user *)from, n);
13760 }
13761
13762 static enum ucode_state
13763 request_microcode_user(int cpu, const void __user *buf, size_t size)
13764 {
13765- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13766+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13767 }
13768
13769 static void microcode_fini_cpu(int cpu)
13770diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13771--- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13772+++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13773@@ -36,21 +36,66 @@
13774 #define DEBUGP(fmt...)
13775 #endif
13776
13777-void *module_alloc(unsigned long size)
13778+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13779 {
13780 if (PAGE_ALIGN(size) > MODULES_LEN)
13781 return NULL;
13782 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13783- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13784+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13785 -1, __builtin_return_address(0));
13786 }
13787
13788+void *module_alloc(unsigned long size)
13789+{
13790+
13791+#ifdef CONFIG_PAX_KERNEXEC
13792+ return __module_alloc(size, PAGE_KERNEL);
13793+#else
13794+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13795+#endif
13796+
13797+}
13798+
13799 /* Free memory returned from module_alloc */
13800 void module_free(struct module *mod, void *module_region)
13801 {
13802 vfree(module_region);
13803 }
13804
13805+#ifdef CONFIG_PAX_KERNEXEC
13806+#ifdef CONFIG_X86_32
13807+void *module_alloc_exec(unsigned long size)
13808+{
13809+ struct vm_struct *area;
13810+
13811+ if (size == 0)
13812+ return NULL;
13813+
13814+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13815+ return area ? area->addr : NULL;
13816+}
13817+EXPORT_SYMBOL(module_alloc_exec);
13818+
13819+void module_free_exec(struct module *mod, void *module_region)
13820+{
13821+ vunmap(module_region);
13822+}
13823+EXPORT_SYMBOL(module_free_exec);
13824+#else
13825+void module_free_exec(struct module *mod, void *module_region)
13826+{
13827+ module_free(mod, module_region);
13828+}
13829+EXPORT_SYMBOL(module_free_exec);
13830+
13831+void *module_alloc_exec(unsigned long size)
13832+{
13833+ return __module_alloc(size, PAGE_KERNEL_RX);
13834+}
13835+EXPORT_SYMBOL(module_alloc_exec);
13836+#endif
13837+#endif
13838+
13839 /* We don't need anything special. */
13840 int module_frob_arch_sections(Elf_Ehdr *hdr,
13841 Elf_Shdr *sechdrs,
13842@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13843 unsigned int i;
13844 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13845 Elf32_Sym *sym;
13846- uint32_t *location;
13847+ uint32_t *plocation, location;
13848
13849 DEBUGP("Applying relocate section %u to %u\n", relsec,
13850 sechdrs[relsec].sh_info);
13851 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13852 /* This is where to make the change */
13853- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13854- + rel[i].r_offset;
13855+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13856+ location = (uint32_t)plocation;
13857+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13858+ plocation = ktla_ktva((void *)plocation);
13859 /* This is the symbol it is referring to. Note that all
13860 undefined symbols have been resolved. */
13861 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13862@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13863 switch (ELF32_R_TYPE(rel[i].r_info)) {
13864 case R_386_32:
13865 /* We add the value into the location given */
13866- *location += sym->st_value;
13867+ pax_open_kernel();
13868+ *plocation += sym->st_value;
13869+ pax_close_kernel();
13870 break;
13871 case R_386_PC32:
13872 /* Add the value, subtract its postition */
13873- *location += sym->st_value - (uint32_t)location;
13874+ pax_open_kernel();
13875+ *plocation += sym->st_value - location;
13876+ pax_close_kernel();
13877 break;
13878 default:
13879 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13880@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13881 case R_X86_64_NONE:
13882 break;
13883 case R_X86_64_64:
13884+ pax_open_kernel();
13885 *(u64 *)loc = val;
13886+ pax_close_kernel();
13887 break;
13888 case R_X86_64_32:
13889+ pax_open_kernel();
13890 *(u32 *)loc = val;
13891+ pax_close_kernel();
13892 if (val != *(u32 *)loc)
13893 goto overflow;
13894 break;
13895 case R_X86_64_32S:
13896+ pax_open_kernel();
13897 *(s32 *)loc = val;
13898+ pax_close_kernel();
13899 if ((s64)val != *(s32 *)loc)
13900 goto overflow;
13901 break;
13902 case R_X86_64_PC32:
13903 val -= (u64)loc;
13904+ pax_open_kernel();
13905 *(u32 *)loc = val;
13906+ pax_close_kernel();
13907+
13908 #if 0
13909 if ((s64)val != *(s32 *)loc)
13910 goto overflow;
13911diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13912--- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13913+++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13914@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13915 {
13916 return x;
13917 }
13918+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13919+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13920+#endif
13921
13922 void __init default_banner(void)
13923 {
13924@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13925 * corresponding structure. */
13926 static void *get_call_destination(u8 type)
13927 {
13928- struct paravirt_patch_template tmpl = {
13929+ const struct paravirt_patch_template tmpl = {
13930 .pv_init_ops = pv_init_ops,
13931 .pv_time_ops = pv_time_ops,
13932 .pv_cpu_ops = pv_cpu_ops,
13933@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13934 .pv_lock_ops = pv_lock_ops,
13935 #endif
13936 };
13937+
13938+ pax_track_stack();
13939+
13940 return *((void **)&tmpl + type);
13941 }
13942
13943@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13944 if (opfunc == NULL)
13945 /* If there's no function, patch it with a ud2a (BUG) */
13946 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13947- else if (opfunc == _paravirt_nop)
13948+ else if (opfunc == (void *)_paravirt_nop)
13949 /* If the operation is a nop, then nop the callsite */
13950 ret = paravirt_patch_nop();
13951
13952 /* identity functions just return their single argument */
13953- else if (opfunc == _paravirt_ident_32)
13954+ else if (opfunc == (void *)_paravirt_ident_32)
13955 ret = paravirt_patch_ident_32(insnbuf, len);
13956- else if (opfunc == _paravirt_ident_64)
13957+ else if (opfunc == (void *)_paravirt_ident_64)
13958 ret = paravirt_patch_ident_64(insnbuf, len);
13959+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13960+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13961+ ret = paravirt_patch_ident_64(insnbuf, len);
13962+#endif
13963
13964 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13965 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13966@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13967 if (insn_len > len || start == NULL)
13968 insn_len = len;
13969 else
13970- memcpy(insnbuf, start, insn_len);
13971+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13972
13973 return insn_len;
13974 }
13975@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13976 preempt_enable();
13977 }
13978
13979-struct pv_info pv_info = {
13980+struct pv_info pv_info __read_only = {
13981 .name = "bare hardware",
13982 .paravirt_enabled = 0,
13983 .kernel_rpl = 0,
13984 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13985 };
13986
13987-struct pv_init_ops pv_init_ops = {
13988+struct pv_init_ops pv_init_ops __read_only = {
13989 .patch = native_patch,
13990 };
13991
13992-struct pv_time_ops pv_time_ops = {
13993+struct pv_time_ops pv_time_ops __read_only = {
13994 .sched_clock = native_sched_clock,
13995 };
13996
13997-struct pv_irq_ops pv_irq_ops = {
13998+struct pv_irq_ops pv_irq_ops __read_only = {
13999 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14000 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14001 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14002@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14003 #endif
14004 };
14005
14006-struct pv_cpu_ops pv_cpu_ops = {
14007+struct pv_cpu_ops pv_cpu_ops __read_only = {
14008 .cpuid = native_cpuid,
14009 .get_debugreg = native_get_debugreg,
14010 .set_debugreg = native_set_debugreg,
14011@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14012 .end_context_switch = paravirt_nop,
14013 };
14014
14015-struct pv_apic_ops pv_apic_ops = {
14016+struct pv_apic_ops pv_apic_ops __read_only = {
14017 #ifdef CONFIG_X86_LOCAL_APIC
14018 .startup_ipi_hook = paravirt_nop,
14019 #endif
14020 };
14021
14022-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14023+#ifdef CONFIG_X86_32
14024+#ifdef CONFIG_X86_PAE
14025+/* 64-bit pagetable entries */
14026+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14027+#else
14028 /* 32-bit pagetable entries */
14029 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14030+#endif
14031 #else
14032 /* 64-bit pagetable entries */
14033 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14034 #endif
14035
14036-struct pv_mmu_ops pv_mmu_ops = {
14037+struct pv_mmu_ops pv_mmu_ops __read_only = {
14038
14039 .read_cr2 = native_read_cr2,
14040 .write_cr2 = native_write_cr2,
14041@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14042 .make_pud = PTE_IDENT,
14043
14044 .set_pgd = native_set_pgd,
14045+ .set_pgd_batched = native_set_pgd_batched,
14046 #endif
14047 #endif /* PAGETABLE_LEVELS >= 3 */
14048
14049@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14050 },
14051
14052 .set_fixmap = native_set_fixmap,
14053+
14054+#ifdef CONFIG_PAX_KERNEXEC
14055+ .pax_open_kernel = native_pax_open_kernel,
14056+ .pax_close_kernel = native_pax_close_kernel,
14057+#endif
14058+
14059 };
14060
14061 EXPORT_SYMBOL_GPL(pv_time_ops);
14062diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
14063--- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14064+++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14065@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14066 arch_spin_lock(lock);
14067 }
14068
14069-struct pv_lock_ops pv_lock_ops = {
14070+struct pv_lock_ops pv_lock_ops __read_only = {
14071 #ifdef CONFIG_SMP
14072 .spin_is_locked = __ticket_spin_is_locked,
14073 .spin_is_contended = __ticket_spin_is_contended,
14074diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14075--- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14076+++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14077@@ -2,7 +2,7 @@
14078 #include <asm/iommu_table.h>
14079 #include <linux/string.h>
14080 #include <linux/kallsyms.h>
14081-
14082+#include <linux/sched.h>
14083
14084 #define DEBUG 1
14085
14086@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14087 {
14088 struct iommu_table_entry *p, *q, *x;
14089
14090+ pax_track_stack();
14091+
14092 /* Simple cyclic dependency checker. */
14093 for (p = start; p < finish; p++) {
14094 q = find_dependents_of(start, finish, p);
14095diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14096--- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14097+++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14098@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14099 unsigned long thread_saved_pc(struct task_struct *tsk)
14100 {
14101 return ((unsigned long *)tsk->thread.sp)[3];
14102+//XXX return tsk->thread.eip;
14103 }
14104
14105 #ifndef CONFIG_SMP
14106@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14107 unsigned long sp;
14108 unsigned short ss, gs;
14109
14110- if (user_mode_vm(regs)) {
14111+ if (user_mode(regs)) {
14112 sp = regs->sp;
14113 ss = regs->ss & 0xffff;
14114- gs = get_user_gs(regs);
14115 } else {
14116 sp = kernel_stack_pointer(regs);
14117 savesegment(ss, ss);
14118- savesegment(gs, gs);
14119 }
14120+ gs = get_user_gs(regs);
14121
14122 show_regs_common();
14123
14124@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14125 struct task_struct *tsk;
14126 int err;
14127
14128- childregs = task_pt_regs(p);
14129+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14130 *childregs = *regs;
14131 childregs->ax = 0;
14132 childregs->sp = sp;
14133
14134 p->thread.sp = (unsigned long) childregs;
14135 p->thread.sp0 = (unsigned long) (childregs+1);
14136+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14137
14138 p->thread.ip = (unsigned long) ret_from_fork;
14139
14140@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14141 struct thread_struct *prev = &prev_p->thread,
14142 *next = &next_p->thread;
14143 int cpu = smp_processor_id();
14144- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14145+ struct tss_struct *tss = init_tss + cpu;
14146 bool preload_fpu;
14147
14148 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14149@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14150 */
14151 lazy_save_gs(prev->gs);
14152
14153+#ifdef CONFIG_PAX_MEMORY_UDEREF
14154+ __set_fs(task_thread_info(next_p)->addr_limit);
14155+#endif
14156+
14157 /*
14158 * Load the per-thread Thread-Local Storage descriptor.
14159 */
14160@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14161 */
14162 arch_end_context_switch(next_p);
14163
14164+ percpu_write(current_task, next_p);
14165+ percpu_write(current_tinfo, &next_p->tinfo);
14166+
14167 if (preload_fpu)
14168 __math_state_restore();
14169
14170@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14171 if (prev->gs | next->gs)
14172 lazy_load_gs(next->gs);
14173
14174- percpu_write(current_task, next_p);
14175-
14176 return prev_p;
14177 }
14178
14179@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14180 } while (count++ < 16);
14181 return 0;
14182 }
14183-
14184diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14185--- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14186+++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14187@@ -87,7 +87,7 @@ static void __exit_idle(void)
14188 void exit_idle(void)
14189 {
14190 /* idle loop has pid 0 */
14191- if (current->pid)
14192+ if (task_pid_nr(current))
14193 return;
14194 __exit_idle();
14195 }
14196@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14197 struct pt_regs *childregs;
14198 struct task_struct *me = current;
14199
14200- childregs = ((struct pt_regs *)
14201- (THREAD_SIZE + task_stack_page(p))) - 1;
14202+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14203 *childregs = *regs;
14204
14205 childregs->ax = 0;
14206@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14207 p->thread.sp = (unsigned long) childregs;
14208 p->thread.sp0 = (unsigned long) (childregs+1);
14209 p->thread.usersp = me->thread.usersp;
14210+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14211
14212 set_tsk_thread_flag(p, TIF_FORK);
14213
14214@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14215 struct thread_struct *prev = &prev_p->thread;
14216 struct thread_struct *next = &next_p->thread;
14217 int cpu = smp_processor_id();
14218- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14219+ struct tss_struct *tss = init_tss + cpu;
14220 unsigned fsindex, gsindex;
14221 bool preload_fpu;
14222
14223@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14224 prev->usersp = percpu_read(old_rsp);
14225 percpu_write(old_rsp, next->usersp);
14226 percpu_write(current_task, next_p);
14227+ percpu_write(current_tinfo, &next_p->tinfo);
14228
14229- percpu_write(kernel_stack,
14230- (unsigned long)task_stack_page(next_p) +
14231- THREAD_SIZE - KERNEL_STACK_OFFSET);
14232+ percpu_write(kernel_stack, next->sp0);
14233
14234 /*
14235 * Now maybe reload the debug registers and handle I/O bitmaps
14236@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14237 if (!p || p == current || p->state == TASK_RUNNING)
14238 return 0;
14239 stack = (unsigned long)task_stack_page(p);
14240- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14241+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14242 return 0;
14243 fp = *(u64 *)(p->thread.sp);
14244 do {
14245- if (fp < (unsigned long)stack ||
14246- fp >= (unsigned long)stack+THREAD_SIZE)
14247+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14248 return 0;
14249 ip = *(u64 *)(fp+8);
14250 if (!in_sched_functions(ip))
14251diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14252--- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14253+++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14254@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14255
14256 void free_thread_info(struct thread_info *ti)
14257 {
14258- free_thread_xstate(ti->task);
14259 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14260 }
14261
14262+static struct kmem_cache *task_struct_cachep;
14263+
14264 void arch_task_cache_init(void)
14265 {
14266- task_xstate_cachep =
14267- kmem_cache_create("task_xstate", xstate_size,
14268+ /* create a slab on which task_structs can be allocated */
14269+ task_struct_cachep =
14270+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14271+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14272+
14273+ task_xstate_cachep =
14274+ kmem_cache_create("task_xstate", xstate_size,
14275 __alignof__(union thread_xstate),
14276- SLAB_PANIC | SLAB_NOTRACK, NULL);
14277+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14278+}
14279+
14280+struct task_struct *alloc_task_struct_node(int node)
14281+{
14282+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14283+}
14284+
14285+void free_task_struct(struct task_struct *task)
14286+{
14287+ free_thread_xstate(task);
14288+ kmem_cache_free(task_struct_cachep, task);
14289 }
14290
14291 /*
14292@@ -70,7 +87,7 @@ void exit_thread(void)
14293 unsigned long *bp = t->io_bitmap_ptr;
14294
14295 if (bp) {
14296- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14297+ struct tss_struct *tss = init_tss + get_cpu();
14298
14299 t->io_bitmap_ptr = NULL;
14300 clear_thread_flag(TIF_IO_BITMAP);
14301@@ -106,7 +123,7 @@ void show_regs_common(void)
14302
14303 printk(KERN_CONT "\n");
14304 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14305- current->pid, current->comm, print_tainted(),
14306+ task_pid_nr(current), current->comm, print_tainted(),
14307 init_utsname()->release,
14308 (int)strcspn(init_utsname()->version, " "),
14309 init_utsname()->version);
14310@@ -120,6 +137,9 @@ void flush_thread(void)
14311 {
14312 struct task_struct *tsk = current;
14313
14314+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14315+ loadsegment(gs, 0);
14316+#endif
14317 flush_ptrace_hw_breakpoint(tsk);
14318 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14319 /*
14320@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14321 regs.di = (unsigned long) arg;
14322
14323 #ifdef CONFIG_X86_32
14324- regs.ds = __USER_DS;
14325- regs.es = __USER_DS;
14326+ regs.ds = __KERNEL_DS;
14327+ regs.es = __KERNEL_DS;
14328 regs.fs = __KERNEL_PERCPU;
14329- regs.gs = __KERNEL_STACK_CANARY;
14330+ savesegment(gs, regs.gs);
14331 #else
14332 regs.ss = __KERNEL_DS;
14333 #endif
14334@@ -403,7 +423,7 @@ void default_idle(void)
14335 EXPORT_SYMBOL(default_idle);
14336 #endif
14337
14338-void stop_this_cpu(void *dummy)
14339+__noreturn void stop_this_cpu(void *dummy)
14340 {
14341 local_irq_disable();
14342 /*
14343@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14344 }
14345 early_param("idle", idle_setup);
14346
14347-unsigned long arch_align_stack(unsigned long sp)
14348+#ifdef CONFIG_PAX_RANDKSTACK
14349+void pax_randomize_kstack(struct pt_regs *regs)
14350 {
14351- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14352- sp -= get_random_int() % 8192;
14353- return sp & ~0xf;
14354-}
14355+ struct thread_struct *thread = &current->thread;
14356+ unsigned long time;
14357
14358-unsigned long arch_randomize_brk(struct mm_struct *mm)
14359-{
14360- unsigned long range_end = mm->brk + 0x02000000;
14361- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14362-}
14363+ if (!randomize_va_space)
14364+ return;
14365+
14366+ if (v8086_mode(regs))
14367+ return;
14368
14369+ rdtscl(time);
14370+
14371+ /* P4 seems to return a 0 LSB, ignore it */
14372+#ifdef CONFIG_MPENTIUM4
14373+ time &= 0x3EUL;
14374+ time <<= 2;
14375+#elif defined(CONFIG_X86_64)
14376+ time &= 0xFUL;
14377+ time <<= 4;
14378+#else
14379+ time &= 0x1FUL;
14380+ time <<= 3;
14381+#endif
14382+
14383+ thread->sp0 ^= time;
14384+ load_sp0(init_tss + smp_processor_id(), thread);
14385+
14386+#ifdef CONFIG_X86_64
14387+ percpu_write(kernel_stack, thread->sp0);
14388+#endif
14389+}
14390+#endif
14391diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14392--- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14393+++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14394@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14395 unsigned long addr, unsigned long data)
14396 {
14397 int ret;
14398- unsigned long __user *datap = (unsigned long __user *)data;
14399+ unsigned long __user *datap = (__force unsigned long __user *)data;
14400
14401 switch (request) {
14402 /* read the word at location addr in the USER area. */
14403@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14404 if ((int) addr < 0)
14405 return -EIO;
14406 ret = do_get_thread_area(child, addr,
14407- (struct user_desc __user *)data);
14408+ (__force struct user_desc __user *) data);
14409 break;
14410
14411 case PTRACE_SET_THREAD_AREA:
14412 if ((int) addr < 0)
14413 return -EIO;
14414 ret = do_set_thread_area(child, addr,
14415- (struct user_desc __user *)data, 0);
14416+ (__force struct user_desc __user *) data, 0);
14417 break;
14418 #endif
14419
14420@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14421 memset(info, 0, sizeof(*info));
14422 info->si_signo = SIGTRAP;
14423 info->si_code = si_code;
14424- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14425+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14426 }
14427
14428 void user_single_step_siginfo(struct task_struct *tsk,
14429diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14430--- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14431+++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14432@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14433 return pv_tsc_khz;
14434 }
14435
14436-static atomic64_t last_value = ATOMIC64_INIT(0);
14437+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14438
14439 void pvclock_resume(void)
14440 {
14441- atomic64_set(&last_value, 0);
14442+ atomic64_set_unchecked(&last_value, 0);
14443 }
14444
14445 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14446@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14447 * updating at the same time, and one of them could be slightly behind,
14448 * making the assumption that last_value always go forward fail to hold.
14449 */
14450- last = atomic64_read(&last_value);
14451+ last = atomic64_read_unchecked(&last_value);
14452 do {
14453 if (ret < last)
14454 return last;
14455- last = atomic64_cmpxchg(&last_value, last, ret);
14456+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14457 } while (unlikely(last != ret));
14458
14459 return ret;
14460diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14461--- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14462+++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14463@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14464 EXPORT_SYMBOL(pm_power_off);
14465
14466 static const struct desc_ptr no_idt = {};
14467-static int reboot_mode;
14468+static unsigned short reboot_mode;
14469 enum reboot_type reboot_type = BOOT_ACPI;
14470 int reboot_force;
14471
14472@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14473 extern const unsigned char machine_real_restart_asm[];
14474 extern const u64 machine_real_restart_gdt[3];
14475
14476-void machine_real_restart(unsigned int type)
14477+__noreturn void machine_real_restart(unsigned int type)
14478 {
14479 void *restart_va;
14480 unsigned long restart_pa;
14481- void (*restart_lowmem)(unsigned int);
14482+ void (* __noreturn restart_lowmem)(unsigned int);
14483 u64 *lowmem_gdt;
14484
14485+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14486+ struct desc_struct *gdt;
14487+#endif
14488+
14489 local_irq_disable();
14490
14491 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14492@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14493 boot)". This seems like a fairly standard thing that gets set by
14494 REBOOT.COM programs, and the previous reset routine did this
14495 too. */
14496- *((unsigned short *)0x472) = reboot_mode;
14497+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14498
14499 /* Patch the GDT in the low memory trampoline */
14500 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14501
14502 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14503 restart_pa = virt_to_phys(restart_va);
14504- restart_lowmem = (void (*)(unsigned int))restart_pa;
14505+ restart_lowmem = (void *)restart_pa;
14506
14507 /* GDT[0]: GDT self-pointer */
14508 lowmem_gdt[0] =
14509@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14510 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14511
14512 /* Jump to the identity-mapped low memory code */
14513+
14514+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14515+ gdt = get_cpu_gdt_table(smp_processor_id());
14516+ pax_open_kernel();
14517+#ifdef CONFIG_PAX_MEMORY_UDEREF
14518+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14519+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14520+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14521+#endif
14522+#ifdef CONFIG_PAX_KERNEXEC
14523+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14524+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14525+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14526+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14527+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14528+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14529+#endif
14530+ pax_close_kernel();
14531+#endif
14532+
14533+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14534+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14535+ unreachable();
14536+#else
14537 restart_lowmem(type);
14538+#endif
14539+
14540 }
14541 #ifdef CONFIG_APM_MODULE
14542 EXPORT_SYMBOL(machine_real_restart);
14543@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14544 * try to force a triple fault and then cycle between hitting the keyboard
14545 * controller and doing that
14546 */
14547-static void native_machine_emergency_restart(void)
14548+__noreturn static void native_machine_emergency_restart(void)
14549 {
14550 int i;
14551 int attempt = 0;
14552@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14553 #endif
14554 }
14555
14556-static void __machine_emergency_restart(int emergency)
14557+static __noreturn void __machine_emergency_restart(int emergency)
14558 {
14559 reboot_emergency = emergency;
14560 machine_ops.emergency_restart();
14561 }
14562
14563-static void native_machine_restart(char *__unused)
14564+static __noreturn void native_machine_restart(char *__unused)
14565 {
14566 printk("machine restart\n");
14567
14568@@ -662,7 +692,7 @@ static void native_machine_restart(char
14569 __machine_emergency_restart(0);
14570 }
14571
14572-static void native_machine_halt(void)
14573+static __noreturn void native_machine_halt(void)
14574 {
14575 /* stop other cpus and apics */
14576 machine_shutdown();
14577@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14578 stop_this_cpu(NULL);
14579 }
14580
14581-static void native_machine_power_off(void)
14582+__noreturn static void native_machine_power_off(void)
14583 {
14584 if (pm_power_off) {
14585 if (!reboot_force)
14586@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14587 }
14588 /* a fallback in case there is no PM info available */
14589 tboot_shutdown(TB_SHUTDOWN_HALT);
14590+ unreachable();
14591 }
14592
14593 struct machine_ops machine_ops = {
14594diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14595--- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14596+++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14597@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14598 * area (640->1Mb) as ram even though it is not.
14599 * take them out.
14600 */
14601- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14602+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14603 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14604 }
14605
14606@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14607
14608 if (!boot_params.hdr.root_flags)
14609 root_mountflags &= ~MS_RDONLY;
14610- init_mm.start_code = (unsigned long) _text;
14611- init_mm.end_code = (unsigned long) _etext;
14612+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14613+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14614 init_mm.end_data = (unsigned long) _edata;
14615 init_mm.brk = _brk_end;
14616
14617- code_resource.start = virt_to_phys(_text);
14618- code_resource.end = virt_to_phys(_etext)-1;
14619- data_resource.start = virt_to_phys(_etext);
14620+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14621+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14622+ data_resource.start = virt_to_phys(_sdata);
14623 data_resource.end = virt_to_phys(_edata)-1;
14624 bss_resource.start = virt_to_phys(&__bss_start);
14625 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14626diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14627--- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14628+++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14629@@ -21,19 +21,17 @@
14630 #include <asm/cpu.h>
14631 #include <asm/stackprotector.h>
14632
14633-DEFINE_PER_CPU(int, cpu_number);
14634+#ifdef CONFIG_SMP
14635+DEFINE_PER_CPU(unsigned int, cpu_number);
14636 EXPORT_PER_CPU_SYMBOL(cpu_number);
14637+#endif
14638
14639-#ifdef CONFIG_X86_64
14640 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14641-#else
14642-#define BOOT_PERCPU_OFFSET 0
14643-#endif
14644
14645 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14646 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14647
14648-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14649+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14650 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14651 };
14652 EXPORT_SYMBOL(__per_cpu_offset);
14653@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14654 {
14655 #ifdef CONFIG_X86_32
14656 struct desc_struct gdt;
14657+ unsigned long base = per_cpu_offset(cpu);
14658
14659- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14660- 0x2 | DESCTYPE_S, 0x8);
14661- gdt.s = 1;
14662+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14663+ 0x83 | DESCTYPE_S, 0xC);
14664 write_gdt_entry(get_cpu_gdt_table(cpu),
14665 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14666 #endif
14667@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14668 /* alrighty, percpu areas up and running */
14669 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14670 for_each_possible_cpu(cpu) {
14671+#ifdef CONFIG_CC_STACKPROTECTOR
14672+#ifdef CONFIG_X86_32
14673+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14674+#endif
14675+#endif
14676 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14677 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14678 per_cpu(cpu_number, cpu) = cpu;
14679@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14680 */
14681 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14682 #endif
14683+#ifdef CONFIG_CC_STACKPROTECTOR
14684+#ifdef CONFIG_X86_32
14685+ if (!cpu)
14686+ per_cpu(stack_canary.canary, cpu) = canary;
14687+#endif
14688+#endif
14689 /*
14690 * Up to this point, the boot CPU has been using .init.data
14691 * area. Reload any changed state for the boot CPU.
14692diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14693--- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14694+++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14695@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14696 * Align the stack pointer according to the i386 ABI,
14697 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14698 */
14699- sp = ((sp + 4) & -16ul) - 4;
14700+ sp = ((sp - 12) & -16ul) - 4;
14701 #else /* !CONFIG_X86_32 */
14702 sp = round_down(sp, 16) - 8;
14703 #endif
14704@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14705 * Return an always-bogus address instead so we will die with SIGSEGV.
14706 */
14707 if (onsigstack && !likely(on_sig_stack(sp)))
14708- return (void __user *)-1L;
14709+ return (__force void __user *)-1L;
14710
14711 /* save i387 state */
14712 if (used_math() && save_i387_xstate(*fpstate) < 0)
14713- return (void __user *)-1L;
14714+ return (__force void __user *)-1L;
14715
14716 return (void __user *)sp;
14717 }
14718@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14719 }
14720
14721 if (current->mm->context.vdso)
14722- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14723+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14724 else
14725- restorer = &frame->retcode;
14726+ restorer = (void __user *)&frame->retcode;
14727 if (ka->sa.sa_flags & SA_RESTORER)
14728 restorer = ka->sa.sa_restorer;
14729
14730@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14731 * reasons and because gdb uses it as a signature to notice
14732 * signal handler stack frames.
14733 */
14734- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14735+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14736
14737 if (err)
14738 return -EFAULT;
14739@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14740 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14741
14742 /* Set up to return from userspace. */
14743- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14744+ if (current->mm->context.vdso)
14745+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14746+ else
14747+ restorer = (void __user *)&frame->retcode;
14748 if (ka->sa.sa_flags & SA_RESTORER)
14749 restorer = ka->sa.sa_restorer;
14750 put_user_ex(restorer, &frame->pretcode);
14751@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14752 * reasons and because gdb uses it as a signature to notice
14753 * signal handler stack frames.
14754 */
14755- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14756+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14757 } put_user_catch(err);
14758
14759 if (err)
14760@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14761 int signr;
14762 sigset_t *oldset;
14763
14764+ pax_track_stack();
14765+
14766 /*
14767 * We want the common case to go fast, which is why we may in certain
14768 * cases get here from kernel mode. Just return without doing anything
14769@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14770 * X86_32: vm86 regs switched out by assembly code before reaching
14771 * here, so testing against kernel CS suffices.
14772 */
14773- if (!user_mode(regs))
14774+ if (!user_mode_novm(regs))
14775 return;
14776
14777 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14778diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14779--- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14780+++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14781@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14782 set_idle_for_cpu(cpu, c_idle.idle);
14783 do_rest:
14784 per_cpu(current_task, cpu) = c_idle.idle;
14785+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14786 #ifdef CONFIG_X86_32
14787 /* Stack for startup_32 can be just as for start_secondary onwards */
14788 irq_ctx_init(cpu);
14789 #else
14790 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14791 initial_gs = per_cpu_offset(cpu);
14792- per_cpu(kernel_stack, cpu) =
14793- (unsigned long)task_stack_page(c_idle.idle) -
14794- KERNEL_STACK_OFFSET + THREAD_SIZE;
14795+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14796 #endif
14797+
14798+ pax_open_kernel();
14799 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14800+ pax_close_kernel();
14801+
14802 initial_code = (unsigned long)start_secondary;
14803 stack_start = c_idle.idle->thread.sp;
14804
14805@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14806
14807 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14808
14809+#ifdef CONFIG_PAX_PER_CPU_PGD
14810+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14811+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14812+ KERNEL_PGD_PTRS);
14813+#endif
14814+
14815 err = do_boot_cpu(apicid, cpu);
14816 if (err) {
14817 pr_debug("do_boot_cpu failed %d\n", err);
14818diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14819--- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14820+++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14821@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14822 struct desc_struct *desc;
14823 unsigned long base;
14824
14825- seg &= ~7UL;
14826+ seg >>= 3;
14827
14828 mutex_lock(&child->mm->context.lock);
14829- if (unlikely((seg >> 3) >= child->mm->context.size))
14830+ if (unlikely(seg >= child->mm->context.size))
14831 addr = -1L; /* bogus selector, access would fault */
14832 else {
14833 desc = child->mm->context.ldt + seg;
14834@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14835 addr += base;
14836 }
14837 mutex_unlock(&child->mm->context.lock);
14838- }
14839+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14840+ addr = ktla_ktva(addr);
14841
14842 return addr;
14843 }
14844@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14845 unsigned char opcode[15];
14846 unsigned long addr = convert_ip_to_linear(child, regs);
14847
14848+ if (addr == -EINVAL)
14849+ return 0;
14850+
14851 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14852 for (i = 0; i < copied; i++) {
14853 switch (opcode[i]) {
14854@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14855
14856 #ifdef CONFIG_X86_64
14857 case 0x40 ... 0x4f:
14858- if (regs->cs != __USER_CS)
14859+ if ((regs->cs & 0xffff) != __USER_CS)
14860 /* 32-bit mode: register increment */
14861 return 0;
14862 /* 64-bit mode: REX prefix */
14863diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14864--- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14865+++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14866@@ -1,3 +1,4 @@
14867+.section .rodata,"a",@progbits
14868 ENTRY(sys_call_table)
14869 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14870 .long sys_exit
14871diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14872--- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14873+++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14874@@ -24,17 +24,224 @@
14875
14876 #include <asm/syscalls.h>
14877
14878-/*
14879- * Do a system call from kernel instead of calling sys_execve so we
14880- * end up with proper pt_regs.
14881- */
14882-int kernel_execve(const char *filename,
14883- const char *const argv[],
14884- const char *const envp[])
14885+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14886 {
14887- long __res;
14888- asm volatile ("int $0x80"
14889- : "=a" (__res)
14890- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14891- return __res;
14892+ unsigned long pax_task_size = TASK_SIZE;
14893+
14894+#ifdef CONFIG_PAX_SEGMEXEC
14895+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14896+ pax_task_size = SEGMEXEC_TASK_SIZE;
14897+#endif
14898+
14899+ if (len > pax_task_size || addr > pax_task_size - len)
14900+ return -EINVAL;
14901+
14902+ return 0;
14903+}
14904+
14905+unsigned long
14906+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14907+ unsigned long len, unsigned long pgoff, unsigned long flags)
14908+{
14909+ struct mm_struct *mm = current->mm;
14910+ struct vm_area_struct *vma;
14911+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14912+
14913+#ifdef CONFIG_PAX_SEGMEXEC
14914+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14915+ pax_task_size = SEGMEXEC_TASK_SIZE;
14916+#endif
14917+
14918+ pax_task_size -= PAGE_SIZE;
14919+
14920+ if (len > pax_task_size)
14921+ return -ENOMEM;
14922+
14923+ if (flags & MAP_FIXED)
14924+ return addr;
14925+
14926+#ifdef CONFIG_PAX_RANDMMAP
14927+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14928+#endif
14929+
14930+ if (addr) {
14931+ addr = PAGE_ALIGN(addr);
14932+ if (pax_task_size - len >= addr) {
14933+ vma = find_vma(mm, addr);
14934+ if (check_heap_stack_gap(vma, addr, len))
14935+ return addr;
14936+ }
14937+ }
14938+ if (len > mm->cached_hole_size) {
14939+ start_addr = addr = mm->free_area_cache;
14940+ } else {
14941+ start_addr = addr = mm->mmap_base;
14942+ mm->cached_hole_size = 0;
14943+ }
14944+
14945+#ifdef CONFIG_PAX_PAGEEXEC
14946+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14947+ start_addr = 0x00110000UL;
14948+
14949+#ifdef CONFIG_PAX_RANDMMAP
14950+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14951+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14952+#endif
14953+
14954+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14955+ start_addr = addr = mm->mmap_base;
14956+ else
14957+ addr = start_addr;
14958+ }
14959+#endif
14960+
14961+full_search:
14962+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14963+ /* At this point: (!vma || addr < vma->vm_end). */
14964+ if (pax_task_size - len < addr) {
14965+ /*
14966+ * Start a new search - just in case we missed
14967+ * some holes.
14968+ */
14969+ if (start_addr != mm->mmap_base) {
14970+ start_addr = addr = mm->mmap_base;
14971+ mm->cached_hole_size = 0;
14972+ goto full_search;
14973+ }
14974+ return -ENOMEM;
14975+ }
14976+ if (check_heap_stack_gap(vma, addr, len))
14977+ break;
14978+ if (addr + mm->cached_hole_size < vma->vm_start)
14979+ mm->cached_hole_size = vma->vm_start - addr;
14980+ addr = vma->vm_end;
14981+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14982+ start_addr = addr = mm->mmap_base;
14983+ mm->cached_hole_size = 0;
14984+ goto full_search;
14985+ }
14986+ }
14987+
14988+ /*
14989+ * Remember the place where we stopped the search:
14990+ */
14991+ mm->free_area_cache = addr + len;
14992+ return addr;
14993+}
14994+
14995+unsigned long
14996+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14997+ const unsigned long len, const unsigned long pgoff,
14998+ const unsigned long flags)
14999+{
15000+ struct vm_area_struct *vma;
15001+ struct mm_struct *mm = current->mm;
15002+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15003+
15004+#ifdef CONFIG_PAX_SEGMEXEC
15005+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15006+ pax_task_size = SEGMEXEC_TASK_SIZE;
15007+#endif
15008+
15009+ pax_task_size -= PAGE_SIZE;
15010+
15011+ /* requested length too big for entire address space */
15012+ if (len > pax_task_size)
15013+ return -ENOMEM;
15014+
15015+ if (flags & MAP_FIXED)
15016+ return addr;
15017+
15018+#ifdef CONFIG_PAX_PAGEEXEC
15019+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15020+ goto bottomup;
15021+#endif
15022+
15023+#ifdef CONFIG_PAX_RANDMMAP
15024+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15025+#endif
15026+
15027+ /* requesting a specific address */
15028+ if (addr) {
15029+ addr = PAGE_ALIGN(addr);
15030+ if (pax_task_size - len >= addr) {
15031+ vma = find_vma(mm, addr);
15032+ if (check_heap_stack_gap(vma, addr, len))
15033+ return addr;
15034+ }
15035+ }
15036+
15037+ /* check if free_area_cache is useful for us */
15038+ if (len <= mm->cached_hole_size) {
15039+ mm->cached_hole_size = 0;
15040+ mm->free_area_cache = mm->mmap_base;
15041+ }
15042+
15043+ /* either no address requested or can't fit in requested address hole */
15044+ addr = mm->free_area_cache;
15045+
15046+ /* make sure it can fit in the remaining address space */
15047+ if (addr > len) {
15048+ vma = find_vma(mm, addr-len);
15049+ if (check_heap_stack_gap(vma, addr - len, len))
15050+ /* remember the address as a hint for next time */
15051+ return (mm->free_area_cache = addr-len);
15052+ }
15053+
15054+ if (mm->mmap_base < len)
15055+ goto bottomup;
15056+
15057+ addr = mm->mmap_base-len;
15058+
15059+ do {
15060+ /*
15061+ * Lookup failure means no vma is above this address,
15062+ * else if new region fits below vma->vm_start,
15063+ * return with success:
15064+ */
15065+ vma = find_vma(mm, addr);
15066+ if (check_heap_stack_gap(vma, addr, len))
15067+ /* remember the address as a hint for next time */
15068+ return (mm->free_area_cache = addr);
15069+
15070+ /* remember the largest hole we saw so far */
15071+ if (addr + mm->cached_hole_size < vma->vm_start)
15072+ mm->cached_hole_size = vma->vm_start - addr;
15073+
15074+ /* try just below the current vma->vm_start */
15075+ addr = skip_heap_stack_gap(vma, len);
15076+ } while (!IS_ERR_VALUE(addr));
15077+
15078+bottomup:
15079+ /*
15080+ * A failed mmap() very likely causes application failure,
15081+ * so fall back to the bottom-up function here. This scenario
15082+ * can happen with large stack limits and large mmap()
15083+ * allocations.
15084+ */
15085+
15086+#ifdef CONFIG_PAX_SEGMEXEC
15087+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15088+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15089+ else
15090+#endif
15091+
15092+ mm->mmap_base = TASK_UNMAPPED_BASE;
15093+
15094+#ifdef CONFIG_PAX_RANDMMAP
15095+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15096+ mm->mmap_base += mm->delta_mmap;
15097+#endif
15098+
15099+ mm->free_area_cache = mm->mmap_base;
15100+ mm->cached_hole_size = ~0UL;
15101+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15102+ /*
15103+ * Restore the topdown base:
15104+ */
15105+ mm->mmap_base = base;
15106+ mm->free_area_cache = base;
15107+ mm->cached_hole_size = ~0UL;
15108+
15109+ return addr;
15110 }
15111diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15112--- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15113+++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15114@@ -32,8 +32,8 @@ out:
15115 return error;
15116 }
15117
15118-static void find_start_end(unsigned long flags, unsigned long *begin,
15119- unsigned long *end)
15120+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15121+ unsigned long *begin, unsigned long *end)
15122 {
15123 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15124 unsigned long new_begin;
15125@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15126 *begin = new_begin;
15127 }
15128 } else {
15129- *begin = TASK_UNMAPPED_BASE;
15130+ *begin = mm->mmap_base;
15131 *end = TASK_SIZE;
15132 }
15133 }
15134@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15135 if (flags & MAP_FIXED)
15136 return addr;
15137
15138- find_start_end(flags, &begin, &end);
15139+ find_start_end(mm, flags, &begin, &end);
15140
15141 if (len > end)
15142 return -ENOMEM;
15143
15144+#ifdef CONFIG_PAX_RANDMMAP
15145+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15146+#endif
15147+
15148 if (addr) {
15149 addr = PAGE_ALIGN(addr);
15150 vma = find_vma(mm, addr);
15151- if (end - len >= addr &&
15152- (!vma || addr + len <= vma->vm_start))
15153+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15154 return addr;
15155 }
15156 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15157@@ -106,7 +109,7 @@ full_search:
15158 }
15159 return -ENOMEM;
15160 }
15161- if (!vma || addr + len <= vma->vm_start) {
15162+ if (check_heap_stack_gap(vma, addr, len)) {
15163 /*
15164 * Remember the place where we stopped the search:
15165 */
15166@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15167 {
15168 struct vm_area_struct *vma;
15169 struct mm_struct *mm = current->mm;
15170- unsigned long addr = addr0;
15171+ unsigned long base = mm->mmap_base, addr = addr0;
15172
15173 /* requested length too big for entire address space */
15174 if (len > TASK_SIZE)
15175@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15176 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15177 goto bottomup;
15178
15179+#ifdef CONFIG_PAX_RANDMMAP
15180+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15181+#endif
15182+
15183 /* requesting a specific address */
15184 if (addr) {
15185 addr = PAGE_ALIGN(addr);
15186- vma = find_vma(mm, addr);
15187- if (TASK_SIZE - len >= addr &&
15188- (!vma || addr + len <= vma->vm_start))
15189- return addr;
15190+ if (TASK_SIZE - len >= addr) {
15191+ vma = find_vma(mm, addr);
15192+ if (check_heap_stack_gap(vma, addr, len))
15193+ return addr;
15194+ }
15195 }
15196
15197 /* check if free_area_cache is useful for us */
15198@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15199 /* make sure it can fit in the remaining address space */
15200 if (addr > len) {
15201 vma = find_vma(mm, addr-len);
15202- if (!vma || addr <= vma->vm_start)
15203+ if (check_heap_stack_gap(vma, addr - len, len))
15204 /* remember the address as a hint for next time */
15205 return mm->free_area_cache = addr-len;
15206 }
15207@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15208 * return with success:
15209 */
15210 vma = find_vma(mm, addr);
15211- if (!vma || addr+len <= vma->vm_start)
15212+ if (check_heap_stack_gap(vma, addr, len))
15213 /* remember the address as a hint for next time */
15214 return mm->free_area_cache = addr;
15215
15216@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15217 mm->cached_hole_size = vma->vm_start - addr;
15218
15219 /* try just below the current vma->vm_start */
15220- addr = vma->vm_start-len;
15221- } while (len < vma->vm_start);
15222+ addr = skip_heap_stack_gap(vma, len);
15223+ } while (!IS_ERR_VALUE(addr));
15224
15225 bottomup:
15226 /*
15227@@ -198,13 +206,21 @@ bottomup:
15228 * can happen with large stack limits and large mmap()
15229 * allocations.
15230 */
15231+ mm->mmap_base = TASK_UNMAPPED_BASE;
15232+
15233+#ifdef CONFIG_PAX_RANDMMAP
15234+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15235+ mm->mmap_base += mm->delta_mmap;
15236+#endif
15237+
15238+ mm->free_area_cache = mm->mmap_base;
15239 mm->cached_hole_size = ~0UL;
15240- mm->free_area_cache = TASK_UNMAPPED_BASE;
15241 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15242 /*
15243 * Restore the topdown base:
15244 */
15245- mm->free_area_cache = mm->mmap_base;
15246+ mm->mmap_base = base;
15247+ mm->free_area_cache = base;
15248 mm->cached_hole_size = ~0UL;
15249
15250 return addr;
15251diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15252--- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15253+++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15254@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15255
15256 void tboot_shutdown(u32 shutdown_type)
15257 {
15258- void (*shutdown)(void);
15259+ void (* __noreturn shutdown)(void);
15260
15261 if (!tboot_enabled())
15262 return;
15263@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15264
15265 switch_to_tboot_pt();
15266
15267- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15268+ shutdown = (void *)tboot->shutdown_entry;
15269 shutdown();
15270
15271 /* should not reach here */
15272@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15273 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15274 }
15275
15276-static atomic_t ap_wfs_count;
15277+static atomic_unchecked_t ap_wfs_count;
15278
15279 static int tboot_wait_for_aps(int num_aps)
15280 {
15281@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15282 {
15283 switch (action) {
15284 case CPU_DYING:
15285- atomic_inc(&ap_wfs_count);
15286+ atomic_inc_unchecked(&ap_wfs_count);
15287 if (num_online_cpus() == 1)
15288- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15289+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15290 return NOTIFY_BAD;
15291 break;
15292 }
15293@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15294
15295 tboot_create_trampoline();
15296
15297- atomic_set(&ap_wfs_count, 0);
15298+ atomic_set_unchecked(&ap_wfs_count, 0);
15299 register_hotcpu_notifier(&tboot_cpu_notifier);
15300 return 0;
15301 }
15302diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15303--- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15304+++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15305@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15306 {
15307 unsigned long pc = instruction_pointer(regs);
15308
15309- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15310+ if (!user_mode(regs) && in_lock_functions(pc)) {
15311 #ifdef CONFIG_FRAME_POINTER
15312- return *(unsigned long *)(regs->bp + sizeof(long));
15313+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15314 #else
15315 unsigned long *sp =
15316 (unsigned long *)kernel_stack_pointer(regs);
15317@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15318 * or above a saved flags. Eflags has bits 22-31 zero,
15319 * kernel addresses don't.
15320 */
15321+
15322+#ifdef CONFIG_PAX_KERNEXEC
15323+ return ktla_ktva(sp[0]);
15324+#else
15325 if (sp[0] >> 22)
15326 return sp[0];
15327 if (sp[1] >> 22)
15328 return sp[1];
15329 #endif
15330+
15331+#endif
15332 }
15333 return pc;
15334 }
15335diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15336--- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15337+++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15338@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15339 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15340 return -EINVAL;
15341
15342+#ifdef CONFIG_PAX_SEGMEXEC
15343+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15344+ return -EINVAL;
15345+#endif
15346+
15347 set_tls_desc(p, idx, &info, 1);
15348
15349 return 0;
15350diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15351--- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15352+++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15353@@ -32,6 +32,12 @@
15354 #include <asm/segment.h>
15355 #include <asm/page_types.h>
15356
15357+#ifdef CONFIG_PAX_KERNEXEC
15358+#define ta(X) (X)
15359+#else
15360+#define ta(X) ((X) - __PAGE_OFFSET)
15361+#endif
15362+
15363 #ifdef CONFIG_SMP
15364
15365 .section ".x86_trampoline","a"
15366@@ -62,7 +68,7 @@ r_base = .
15367 inc %ax # protected mode (PE) bit
15368 lmsw %ax # into protected mode
15369 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15370- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15371+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15372
15373 # These need to be in the same 64K segment as the above;
15374 # hence we don't use the boot_gdt_descr defined in head.S
15375diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15376--- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15377+++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15378@@ -90,7 +90,7 @@ startup_32:
15379 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15380 movl %eax, %ds
15381
15382- movl $X86_CR4_PAE, %eax
15383+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15384 movl %eax, %cr4 # Enable PAE mode
15385
15386 # Setup trampoline 4 level pagetables
15387@@ -138,7 +138,7 @@ tidt:
15388 # so the kernel can live anywhere
15389 .balign 4
15390 tgdt:
15391- .short tgdt_end - tgdt # gdt limit
15392+ .short tgdt_end - tgdt - 1 # gdt limit
15393 .long tgdt - r_base
15394 .short 0
15395 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15396diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15397--- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15398+++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15399@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15400
15401 /* Do we ignore FPU interrupts ? */
15402 char ignore_fpu_irq;
15403-
15404-/*
15405- * The IDT has to be page-aligned to simplify the Pentium
15406- * F0 0F bug workaround.
15407- */
15408-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15409 #endif
15410
15411 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15412@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15413 }
15414
15415 static void __kprobes
15416-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15417+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15418 long error_code, siginfo_t *info)
15419 {
15420 struct task_struct *tsk = current;
15421
15422 #ifdef CONFIG_X86_32
15423- if (regs->flags & X86_VM_MASK) {
15424+ if (v8086_mode(regs)) {
15425 /*
15426 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15427 * On nmi (interrupt 2), do_trap should not be called.
15428@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15429 }
15430 #endif
15431
15432- if (!user_mode(regs))
15433+ if (!user_mode_novm(regs))
15434 goto kernel_trap;
15435
15436 #ifdef CONFIG_X86_32
15437@@ -157,7 +151,7 @@ trap_signal:
15438 printk_ratelimit()) {
15439 printk(KERN_INFO
15440 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15441- tsk->comm, tsk->pid, str,
15442+ tsk->comm, task_pid_nr(tsk), str,
15443 regs->ip, regs->sp, error_code);
15444 print_vma_addr(" in ", regs->ip);
15445 printk("\n");
15446@@ -174,8 +168,20 @@ kernel_trap:
15447 if (!fixup_exception(regs)) {
15448 tsk->thread.error_code = error_code;
15449 tsk->thread.trap_no = trapnr;
15450+
15451+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15452+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15453+ str = "PAX: suspicious stack segment fault";
15454+#endif
15455+
15456 die(str, regs, error_code);
15457 }
15458+
15459+#ifdef CONFIG_PAX_REFCOUNT
15460+ if (trapnr == 4)
15461+ pax_report_refcount_overflow(regs);
15462+#endif
15463+
15464 return;
15465
15466 #ifdef CONFIG_X86_32
15467@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15468 conditional_sti(regs);
15469
15470 #ifdef CONFIG_X86_32
15471- if (regs->flags & X86_VM_MASK)
15472+ if (v8086_mode(regs))
15473 goto gp_in_vm86;
15474 #endif
15475
15476 tsk = current;
15477- if (!user_mode(regs))
15478+ if (!user_mode_novm(regs))
15479 goto gp_in_kernel;
15480
15481+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15482+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15483+ struct mm_struct *mm = tsk->mm;
15484+ unsigned long limit;
15485+
15486+ down_write(&mm->mmap_sem);
15487+ limit = mm->context.user_cs_limit;
15488+ if (limit < TASK_SIZE) {
15489+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15490+ up_write(&mm->mmap_sem);
15491+ return;
15492+ }
15493+ up_write(&mm->mmap_sem);
15494+ }
15495+#endif
15496+
15497 tsk->thread.error_code = error_code;
15498 tsk->thread.trap_no = 13;
15499
15500@@ -304,6 +326,13 @@ gp_in_kernel:
15501 if (notify_die(DIE_GPF, "general protection fault", regs,
15502 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15503 return;
15504+
15505+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15506+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15507+ die("PAX: suspicious general protection fault", regs, error_code);
15508+ else
15509+#endif
15510+
15511 die("general protection fault", regs, error_code);
15512 }
15513
15514@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15515 dotraplinkage notrace __kprobes void
15516 do_nmi(struct pt_regs *regs, long error_code)
15517 {
15518+
15519+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15520+ if (!user_mode(regs)) {
15521+ unsigned long cs = regs->cs & 0xFFFF;
15522+ unsigned long ip = ktva_ktla(regs->ip);
15523+
15524+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15525+ regs->ip = ip;
15526+ }
15527+#endif
15528+
15529 nmi_enter();
15530
15531 inc_irq_stat(__nmi_count);
15532@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15533 /* It's safe to allow irq's after DR6 has been saved */
15534 preempt_conditional_sti(regs);
15535
15536- if (regs->flags & X86_VM_MASK) {
15537+ if (v8086_mode(regs)) {
15538 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15539 error_code, 1);
15540 preempt_conditional_cli(regs);
15541@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15542 * We already checked v86 mode above, so we can check for kernel mode
15543 * by just checking the CPL of CS.
15544 */
15545- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15546+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15547 tsk->thread.debugreg6 &= ~DR_STEP;
15548 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15549 regs->flags &= ~X86_EFLAGS_TF;
15550@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15551 return;
15552 conditional_sti(regs);
15553
15554- if (!user_mode_vm(regs))
15555+ if (!user_mode(regs))
15556 {
15557 if (!fixup_exception(regs)) {
15558 task->thread.error_code = error_code;
15559@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15560 void __math_state_restore(void)
15561 {
15562 struct thread_info *thread = current_thread_info();
15563- struct task_struct *tsk = thread->task;
15564+ struct task_struct *tsk = current;
15565
15566 /*
15567 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15568@@ -750,8 +790,7 @@ void __math_state_restore(void)
15569 */
15570 asmlinkage void math_state_restore(void)
15571 {
15572- struct thread_info *thread = current_thread_info();
15573- struct task_struct *tsk = thread->task;
15574+ struct task_struct *tsk = current;
15575
15576 if (!tsk_used_math(tsk)) {
15577 local_irq_enable();
15578diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15579--- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15580+++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15581@@ -20,6 +20,7 @@
15582 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15583 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15584 * arch/x86/kernel/head_32.S: processor startup
15585+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15586 *
15587 * verify_cpu, returns the status of longmode and SSE in register %eax.
15588 * 0: Success 1: Failure
15589diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15590--- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15591+++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15592@@ -41,6 +41,7 @@
15593 #include <linux/ptrace.h>
15594 #include <linux/audit.h>
15595 #include <linux/stddef.h>
15596+#include <linux/grsecurity.h>
15597
15598 #include <asm/uaccess.h>
15599 #include <asm/io.h>
15600@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15601 do_exit(SIGSEGV);
15602 }
15603
15604- tss = &per_cpu(init_tss, get_cpu());
15605+ tss = init_tss + get_cpu();
15606 current->thread.sp0 = current->thread.saved_sp0;
15607 current->thread.sysenter_cs = __KERNEL_CS;
15608 load_sp0(tss, &current->thread);
15609@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15610 struct task_struct *tsk;
15611 int tmp, ret = -EPERM;
15612
15613+#ifdef CONFIG_GRKERNSEC_VM86
15614+ if (!capable(CAP_SYS_RAWIO)) {
15615+ gr_handle_vm86();
15616+ goto out;
15617+ }
15618+#endif
15619+
15620 tsk = current;
15621 if (tsk->thread.saved_sp0)
15622 goto out;
15623@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15624 int tmp, ret;
15625 struct vm86plus_struct __user *v86;
15626
15627+#ifdef CONFIG_GRKERNSEC_VM86
15628+ if (!capable(CAP_SYS_RAWIO)) {
15629+ gr_handle_vm86();
15630+ ret = -EPERM;
15631+ goto out;
15632+ }
15633+#endif
15634+
15635 tsk = current;
15636 switch (cmd) {
15637 case VM86_REQUEST_IRQ:
15638@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15639 tsk->thread.saved_fs = info->regs32->fs;
15640 tsk->thread.saved_gs = get_user_gs(info->regs32);
15641
15642- tss = &per_cpu(init_tss, get_cpu());
15643+ tss = init_tss + get_cpu();
15644 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15645 if (cpu_has_sep)
15646 tsk->thread.sysenter_cs = 0;
15647@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15648 goto cannot_handle;
15649 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15650 goto cannot_handle;
15651- intr_ptr = (unsigned long __user *) (i << 2);
15652+ intr_ptr = (__force unsigned long __user *) (i << 2);
15653 if (get_user(segoffs, intr_ptr))
15654 goto cannot_handle;
15655 if ((segoffs >> 16) == BIOSSEG)
15656diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15657--- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15658+++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15659@@ -26,6 +26,13 @@
15660 #include <asm/page_types.h>
15661 #include <asm/cache.h>
15662 #include <asm/boot.h>
15663+#include <asm/segment.h>
15664+
15665+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15666+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15667+#else
15668+#define __KERNEL_TEXT_OFFSET 0
15669+#endif
15670
15671 #undef i386 /* in case the preprocessor is a 32bit one */
15672
15673@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15674
15675 PHDRS {
15676 text PT_LOAD FLAGS(5); /* R_E */
15677+#ifdef CONFIG_X86_32
15678+ module PT_LOAD FLAGS(5); /* R_E */
15679+#endif
15680+#ifdef CONFIG_XEN
15681+ rodata PT_LOAD FLAGS(5); /* R_E */
15682+#else
15683+ rodata PT_LOAD FLAGS(4); /* R__ */
15684+#endif
15685 data PT_LOAD FLAGS(6); /* RW_ */
15686 #ifdef CONFIG_X86_64
15687 user PT_LOAD FLAGS(5); /* R_E */
15688+#endif
15689+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15690 #ifdef CONFIG_SMP
15691 percpu PT_LOAD FLAGS(6); /* RW_ */
15692 #endif
15693+ text.init PT_LOAD FLAGS(5); /* R_E */
15694+ text.exit PT_LOAD FLAGS(5); /* R_E */
15695 init PT_LOAD FLAGS(7); /* RWE */
15696-#endif
15697 note PT_NOTE FLAGS(0); /* ___ */
15698 }
15699
15700 SECTIONS
15701 {
15702 #ifdef CONFIG_X86_32
15703- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15704- phys_startup_32 = startup_32 - LOAD_OFFSET;
15705+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15706 #else
15707- . = __START_KERNEL;
15708- phys_startup_64 = startup_64 - LOAD_OFFSET;
15709+ . = __START_KERNEL;
15710 #endif
15711
15712 /* Text and read-only data */
15713- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15714- _text = .;
15715+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15716 /* bootstrapping code */
15717+#ifdef CONFIG_X86_32
15718+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15719+#else
15720+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15721+#endif
15722+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15723+ _text = .;
15724 HEAD_TEXT
15725 #ifdef CONFIG_X86_32
15726 . = ALIGN(PAGE_SIZE);
15727@@ -109,13 +131,47 @@ SECTIONS
15728 IRQENTRY_TEXT
15729 *(.fixup)
15730 *(.gnu.warning)
15731- /* End of text section */
15732- _etext = .;
15733 } :text = 0x9090
15734
15735- NOTES :text :note
15736+ . += __KERNEL_TEXT_OFFSET;
15737+
15738+#ifdef CONFIG_X86_32
15739+ . = ALIGN(PAGE_SIZE);
15740+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15741+
15742+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15743+ MODULES_EXEC_VADDR = .;
15744+ BYTE(0)
15745+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15746+ . = ALIGN(HPAGE_SIZE);
15747+ MODULES_EXEC_END = . - 1;
15748+#endif
15749+
15750+ } :module
15751+#endif
15752+
15753+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15754+ /* End of text section */
15755+ _etext = . - __KERNEL_TEXT_OFFSET;
15756+ }
15757+
15758+#ifdef CONFIG_X86_32
15759+ . = ALIGN(PAGE_SIZE);
15760+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15761+ *(.idt)
15762+ . = ALIGN(PAGE_SIZE);
15763+ *(.empty_zero_page)
15764+ *(.initial_pg_fixmap)
15765+ *(.initial_pg_pmd)
15766+ *(.initial_page_table)
15767+ *(.swapper_pg_dir)
15768+ } :rodata
15769+#endif
15770+
15771+ . = ALIGN(PAGE_SIZE);
15772+ NOTES :rodata :note
15773
15774- EXCEPTION_TABLE(16) :text = 0x9090
15775+ EXCEPTION_TABLE(16) :rodata
15776
15777 #if defined(CONFIG_DEBUG_RODATA)
15778 /* .text should occupy whole number of pages */
15779@@ -127,16 +183,20 @@ SECTIONS
15780
15781 /* Data */
15782 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15783+
15784+#ifdef CONFIG_PAX_KERNEXEC
15785+ . = ALIGN(HPAGE_SIZE);
15786+#else
15787+ . = ALIGN(PAGE_SIZE);
15788+#endif
15789+
15790 /* Start of data section */
15791 _sdata = .;
15792
15793 /* init_task */
15794 INIT_TASK_DATA(THREAD_SIZE)
15795
15796-#ifdef CONFIG_X86_32
15797- /* 32 bit has nosave before _edata */
15798 NOSAVE_DATA
15799-#endif
15800
15801 PAGE_ALIGNED_DATA(PAGE_SIZE)
15802
15803@@ -208,12 +268,19 @@ SECTIONS
15804 #endif /* CONFIG_X86_64 */
15805
15806 /* Init code and data - will be freed after init */
15807- . = ALIGN(PAGE_SIZE);
15808 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15809+ BYTE(0)
15810+
15811+#ifdef CONFIG_PAX_KERNEXEC
15812+ . = ALIGN(HPAGE_SIZE);
15813+#else
15814+ . = ALIGN(PAGE_SIZE);
15815+#endif
15816+
15817 __init_begin = .; /* paired with __init_end */
15818- }
15819+ } :init.begin
15820
15821-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15822+#ifdef CONFIG_SMP
15823 /*
15824 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15825 * output PHDR, so the next output section - .init.text - should
15826@@ -222,12 +289,27 @@ SECTIONS
15827 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15828 #endif
15829
15830- INIT_TEXT_SECTION(PAGE_SIZE)
15831-#ifdef CONFIG_X86_64
15832- :init
15833-#endif
15834+ . = ALIGN(PAGE_SIZE);
15835+ init_begin = .;
15836+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15837+ VMLINUX_SYMBOL(_sinittext) = .;
15838+ INIT_TEXT
15839+ VMLINUX_SYMBOL(_einittext) = .;
15840+ . = ALIGN(PAGE_SIZE);
15841+ } :text.init
15842
15843- INIT_DATA_SECTION(16)
15844+ /*
15845+ * .exit.text is discard at runtime, not link time, to deal with
15846+ * references from .altinstructions and .eh_frame
15847+ */
15848+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15849+ EXIT_TEXT
15850+ . = ALIGN(16);
15851+ } :text.exit
15852+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15853+
15854+ . = ALIGN(PAGE_SIZE);
15855+ INIT_DATA_SECTION(16) :init
15856
15857 /*
15858 * Code and data for a variety of lowlevel trampolines, to be
15859@@ -301,19 +383,12 @@ SECTIONS
15860 }
15861
15862 . = ALIGN(8);
15863- /*
15864- * .exit.text is discard at runtime, not link time, to deal with
15865- * references from .altinstructions and .eh_frame
15866- */
15867- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15868- EXIT_TEXT
15869- }
15870
15871 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15872 EXIT_DATA
15873 }
15874
15875-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15876+#ifndef CONFIG_SMP
15877 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15878 #endif
15879
15880@@ -332,16 +407,10 @@ SECTIONS
15881 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15882 __smp_locks = .;
15883 *(.smp_locks)
15884- . = ALIGN(PAGE_SIZE);
15885 __smp_locks_end = .;
15886+ . = ALIGN(PAGE_SIZE);
15887 }
15888
15889-#ifdef CONFIG_X86_64
15890- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15891- NOSAVE_DATA
15892- }
15893-#endif
15894-
15895 /* BSS */
15896 . = ALIGN(PAGE_SIZE);
15897 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15898@@ -357,6 +426,7 @@ SECTIONS
15899 __brk_base = .;
15900 . += 64 * 1024; /* 64k alignment slop space */
15901 *(.brk_reservation) /* areas brk users have reserved */
15902+ . = ALIGN(HPAGE_SIZE);
15903 __brk_limit = .;
15904 }
15905
15906@@ -383,13 +453,12 @@ SECTIONS
15907 * for the boot processor.
15908 */
15909 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15910-INIT_PER_CPU(gdt_page);
15911 INIT_PER_CPU(irq_stack_union);
15912
15913 /*
15914 * Build-time check on the image size:
15915 */
15916-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15917+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15918 "kernel image bigger than KERNEL_IMAGE_SIZE");
15919
15920 #ifdef CONFIG_SMP
15921diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15922--- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15923+++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15924@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15925 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15926 {
15927 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15928- .sysctl_enabled = 1,
15929+ .sysctl_enabled = 0,
15930 };
15931
15932 void update_vsyscall_tz(void)
15933@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15934 static ctl_table kernel_table2[] = {
15935 { .procname = "vsyscall64",
15936 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15937- .mode = 0644,
15938+ .mode = 0444,
15939 .proc_handler = proc_dointvec },
15940 {}
15941 };
15942diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15943--- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15944+++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15945@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15946 EXPORT_SYMBOL(copy_user_generic_string);
15947 EXPORT_SYMBOL(copy_user_generic_unrolled);
15948 EXPORT_SYMBOL(__copy_user_nocache);
15949-EXPORT_SYMBOL(_copy_from_user);
15950-EXPORT_SYMBOL(_copy_to_user);
15951
15952 EXPORT_SYMBOL(copy_page);
15953 EXPORT_SYMBOL(clear_page);
15954diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15955--- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15956+++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15957@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15958 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15959 return -EINVAL;
15960
15961- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15962+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15963 fx_sw_user->extended_size -
15964 FP_XSTATE_MAGIC2_SIZE));
15965 if (err)
15966@@ -267,7 +267,7 @@ fx_only:
15967 * the other extended state.
15968 */
15969 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15970- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15971+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15972 }
15973
15974 /*
15975@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15976 if (use_xsave())
15977 err = restore_user_xstate(buf);
15978 else
15979- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15980+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15981 buf);
15982 if (unlikely(err)) {
15983 /*
15984diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15985--- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15986+++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15987@@ -96,7 +96,7 @@
15988 #define Src2ImmByte (2<<29)
15989 #define Src2One (3<<29)
15990 #define Src2Imm (4<<29)
15991-#define Src2Mask (7<<29)
15992+#define Src2Mask (7U<<29)
15993
15994 #define X2(x...) x, x
15995 #define X3(x...) X2(x), x
15996@@ -207,6 +207,7 @@ struct gprefix {
15997
15998 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15999 do { \
16000+ unsigned long _tmp; \
16001 __asm__ __volatile__ ( \
16002 _PRE_EFLAGS("0", "4", "2") \
16003 _op _suffix " %"_x"3,%1; " \
16004@@ -220,8 +221,6 @@ struct gprefix {
16005 /* Raw emulation: instruction has two explicit operands. */
16006 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16007 do { \
16008- unsigned long _tmp; \
16009- \
16010 switch ((_dst).bytes) { \
16011 case 2: \
16012 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16013@@ -237,7 +236,6 @@ struct gprefix {
16014
16015 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16016 do { \
16017- unsigned long _tmp; \
16018 switch ((_dst).bytes) { \
16019 case 1: \
16020 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16021diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
16022--- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16023+++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16024@@ -53,7 +53,7 @@
16025 #define APIC_BUS_CYCLE_NS 1
16026
16027 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16028-#define apic_debug(fmt, arg...)
16029+#define apic_debug(fmt, arg...) do {} while (0)
16030
16031 #define APIC_LVT_NUM 6
16032 /* 14 is the version for Xeon and Pentium 8.4.8*/
16033diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
16034--- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16035+++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16036@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16037
16038 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16039
16040- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16041+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16042
16043 /*
16044 * Assume that the pte write on a page table of the same type
16045@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16046 }
16047
16048 spin_lock(&vcpu->kvm->mmu_lock);
16049- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16050+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16051 gentry = 0;
16052 kvm_mmu_free_some_pages(vcpu);
16053 ++vcpu->kvm->stat.mmu_pte_write;
16054diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
16055--- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16056+++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
16057@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16058 unsigned long mmu_seq;
16059 bool map_writable;
16060
16061+ pax_track_stack();
16062+
16063 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16064
16065 r = mmu_topup_memory_caches(vcpu);
16066@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16067 if (need_flush)
16068 kvm_flush_remote_tlbs(vcpu->kvm);
16069
16070- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16071+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16072
16073 spin_unlock(&vcpu->kvm->mmu_lock);
16074
16075diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
16076--- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16077+++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16078@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16079 int cpu = raw_smp_processor_id();
16080
16081 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16082+
16083+ pax_open_kernel();
16084 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16085+ pax_close_kernel();
16086+
16087 load_TR_desc();
16088 }
16089
16090@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16091 #endif
16092 #endif
16093
16094+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16095+ __set_fs(current_thread_info()->addr_limit);
16096+#endif
16097+
16098 reload_tss(vcpu);
16099
16100 local_irq_disable();
16101diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16102--- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16103+++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16104@@ -797,7 +797,11 @@ static void reload_tss(void)
16105 struct desc_struct *descs;
16106
16107 descs = (void *)gdt->address;
16108+
16109+ pax_open_kernel();
16110 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16111+ pax_close_kernel();
16112+
16113 load_TR_desc();
16114 }
16115
16116@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16117 if (!cpu_has_vmx_flexpriority())
16118 flexpriority_enabled = 0;
16119
16120- if (!cpu_has_vmx_tpr_shadow())
16121- kvm_x86_ops->update_cr8_intercept = NULL;
16122+ if (!cpu_has_vmx_tpr_shadow()) {
16123+ pax_open_kernel();
16124+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16125+ pax_close_kernel();
16126+ }
16127
16128 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16129 kvm_disable_largepages();
16130@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16131 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16132
16133 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16134- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16135+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16136 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16137 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16138 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16139@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16140 "jmp .Lkvm_vmx_return \n\t"
16141 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16142 ".Lkvm_vmx_return: "
16143+
16144+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16145+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16146+ ".Lkvm_vmx_return2: "
16147+#endif
16148+
16149 /* Save guest registers, load host registers, keep flags */
16150 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16151 "pop %0 \n\t"
16152@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16153 #endif
16154 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16155 [wordsize]"i"(sizeof(ulong))
16156+
16157+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16158+ ,[cs]"i"(__KERNEL_CS)
16159+#endif
16160+
16161 : "cc", "memory"
16162 , R"ax", R"bx", R"di", R"si"
16163 #ifdef CONFIG_X86_64
16164@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16165
16166 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16167
16168- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16169+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16170+
16171+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16172+ loadsegment(fs, __KERNEL_PERCPU);
16173+#endif
16174+
16175+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16176+ __set_fs(current_thread_info()->addr_limit);
16177+#endif
16178+
16179 vmx->launched = 1;
16180
16181 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16182diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16183--- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16184+++ linux-3.0.4/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16185@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16186 if (n < msr_list.nmsrs)
16187 goto out;
16188 r = -EFAULT;
16189+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16190+ goto out;
16191 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16192 num_msrs_to_save * sizeof(u32)))
16193 goto out;
16194@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16195 struct kvm_cpuid2 *cpuid,
16196 struct kvm_cpuid_entry2 __user *entries)
16197 {
16198- int r;
16199+ int r, i;
16200
16201 r = -E2BIG;
16202 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16203 goto out;
16204 r = -EFAULT;
16205- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16206- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16207+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16208 goto out;
16209+ for (i = 0; i < cpuid->nent; ++i) {
16210+ struct kvm_cpuid_entry2 cpuid_entry;
16211+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16212+ goto out;
16213+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16214+ }
16215 vcpu->arch.cpuid_nent = cpuid->nent;
16216 kvm_apic_set_version(vcpu);
16217 kvm_x86_ops->cpuid_update(vcpu);
16218@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16219 struct kvm_cpuid2 *cpuid,
16220 struct kvm_cpuid_entry2 __user *entries)
16221 {
16222- int r;
16223+ int r, i;
16224
16225 r = -E2BIG;
16226 if (cpuid->nent < vcpu->arch.cpuid_nent)
16227 goto out;
16228 r = -EFAULT;
16229- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16230- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16231+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16232 goto out;
16233+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16234+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16235+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16236+ goto out;
16237+ }
16238 return 0;
16239
16240 out:
16241@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16242 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16243 struct kvm_interrupt *irq)
16244 {
16245- if (irq->irq < 0 || irq->irq >= 256)
16246+ if (irq->irq >= 256)
16247 return -EINVAL;
16248 if (irqchip_in_kernel(vcpu->kvm))
16249 return -ENXIO;
16250@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16251 }
16252 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16253
16254-int kvm_arch_init(void *opaque)
16255+int kvm_arch_init(const void *opaque)
16256 {
16257 int r;
16258 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16259diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16260--- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16261+++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16262@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16263 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16264 * Launcher to reboot us.
16265 */
16266-static void lguest_restart(char *reason)
16267+static __noreturn void lguest_restart(char *reason)
16268 {
16269 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16270+ BUG();
16271 }
16272
16273 /*G:050
16274diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16275--- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16276+++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16277@@ -8,18 +8,30 @@
16278
16279 long long atomic64_read_cx8(long long, const atomic64_t *v);
16280 EXPORT_SYMBOL(atomic64_read_cx8);
16281+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16282+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16283 long long atomic64_set_cx8(long long, const atomic64_t *v);
16284 EXPORT_SYMBOL(atomic64_set_cx8);
16285+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16286+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16287 long long atomic64_xchg_cx8(long long, unsigned high);
16288 EXPORT_SYMBOL(atomic64_xchg_cx8);
16289 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16290 EXPORT_SYMBOL(atomic64_add_return_cx8);
16291+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16292+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16293 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16294 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16295+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16296+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16297 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16298 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16299+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16300+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16301 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16302 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16303+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16304+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16305 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16307 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16308@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16309 #ifndef CONFIG_X86_CMPXCHG64
16310 long long atomic64_read_386(long long, const atomic64_t *v);
16311 EXPORT_SYMBOL(atomic64_read_386);
16312+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16313+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16314 long long atomic64_set_386(long long, const atomic64_t *v);
16315 EXPORT_SYMBOL(atomic64_set_386);
16316+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16317+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16318 long long atomic64_xchg_386(long long, unsigned high);
16319 EXPORT_SYMBOL(atomic64_xchg_386);
16320 long long atomic64_add_return_386(long long a, atomic64_t *v);
16321 EXPORT_SYMBOL(atomic64_add_return_386);
16322+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16323+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16324 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16325 EXPORT_SYMBOL(atomic64_sub_return_386);
16326+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16327+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16328 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16329 EXPORT_SYMBOL(atomic64_inc_return_386);
16330+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16331+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16332 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16333 EXPORT_SYMBOL(atomic64_dec_return_386);
16334+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16335+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16336 long long atomic64_add_386(long long a, atomic64_t *v);
16337 EXPORT_SYMBOL(atomic64_add_386);
16338+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16339+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16340 long long atomic64_sub_386(long long a, atomic64_t *v);
16341 EXPORT_SYMBOL(atomic64_sub_386);
16342+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16343+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16344 long long atomic64_inc_386(long long a, atomic64_t *v);
16345 EXPORT_SYMBOL(atomic64_inc_386);
16346+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16347+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16348 long long atomic64_dec_386(long long a, atomic64_t *v);
16349 EXPORT_SYMBOL(atomic64_dec_386);
16350+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16351+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16352 long long atomic64_dec_if_positive_386(atomic64_t *v);
16353 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16354 int atomic64_inc_not_zero_386(atomic64_t *v);
16355diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16356--- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16357+++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16358@@ -48,6 +48,10 @@ BEGIN(read)
16359 movl (v), %eax
16360 movl 4(v), %edx
16361 RET_ENDP
16362+BEGIN(read_unchecked)
16363+ movl (v), %eax
16364+ movl 4(v), %edx
16365+RET_ENDP
16366 #undef v
16367
16368 #define v %esi
16369@@ -55,6 +59,10 @@ BEGIN(set)
16370 movl %ebx, (v)
16371 movl %ecx, 4(v)
16372 RET_ENDP
16373+BEGIN(set_unchecked)
16374+ movl %ebx, (v)
16375+ movl %ecx, 4(v)
16376+RET_ENDP
16377 #undef v
16378
16379 #define v %esi
16380@@ -70,6 +78,20 @@ RET_ENDP
16381 BEGIN(add)
16382 addl %eax, (v)
16383 adcl %edx, 4(v)
16384+
16385+#ifdef CONFIG_PAX_REFCOUNT
16386+ jno 0f
16387+ subl %eax, (v)
16388+ sbbl %edx, 4(v)
16389+ int $4
16390+0:
16391+ _ASM_EXTABLE(0b, 0b)
16392+#endif
16393+
16394+RET_ENDP
16395+BEGIN(add_unchecked)
16396+ addl %eax, (v)
16397+ adcl %edx, 4(v)
16398 RET_ENDP
16399 #undef v
16400
16401@@ -77,6 +99,24 @@ RET_ENDP
16402 BEGIN(add_return)
16403 addl (v), %eax
16404 adcl 4(v), %edx
16405+
16406+#ifdef CONFIG_PAX_REFCOUNT
16407+ into
16408+1234:
16409+ _ASM_EXTABLE(1234b, 2f)
16410+#endif
16411+
16412+ movl %eax, (v)
16413+ movl %edx, 4(v)
16414+
16415+#ifdef CONFIG_PAX_REFCOUNT
16416+2:
16417+#endif
16418+
16419+RET_ENDP
16420+BEGIN(add_return_unchecked)
16421+ addl (v), %eax
16422+ adcl 4(v), %edx
16423 movl %eax, (v)
16424 movl %edx, 4(v)
16425 RET_ENDP
16426@@ -86,6 +126,20 @@ RET_ENDP
16427 BEGIN(sub)
16428 subl %eax, (v)
16429 sbbl %edx, 4(v)
16430+
16431+#ifdef CONFIG_PAX_REFCOUNT
16432+ jno 0f
16433+ addl %eax, (v)
16434+ adcl %edx, 4(v)
16435+ int $4
16436+0:
16437+ _ASM_EXTABLE(0b, 0b)
16438+#endif
16439+
16440+RET_ENDP
16441+BEGIN(sub_unchecked)
16442+ subl %eax, (v)
16443+ sbbl %edx, 4(v)
16444 RET_ENDP
16445 #undef v
16446
16447@@ -96,6 +150,27 @@ BEGIN(sub_return)
16448 sbbl $0, %edx
16449 addl (v), %eax
16450 adcl 4(v), %edx
16451+
16452+#ifdef CONFIG_PAX_REFCOUNT
16453+ into
16454+1234:
16455+ _ASM_EXTABLE(1234b, 2f)
16456+#endif
16457+
16458+ movl %eax, (v)
16459+ movl %edx, 4(v)
16460+
16461+#ifdef CONFIG_PAX_REFCOUNT
16462+2:
16463+#endif
16464+
16465+RET_ENDP
16466+BEGIN(sub_return_unchecked)
16467+ negl %edx
16468+ negl %eax
16469+ sbbl $0, %edx
16470+ addl (v), %eax
16471+ adcl 4(v), %edx
16472 movl %eax, (v)
16473 movl %edx, 4(v)
16474 RET_ENDP
16475@@ -105,6 +180,20 @@ RET_ENDP
16476 BEGIN(inc)
16477 addl $1, (v)
16478 adcl $0, 4(v)
16479+
16480+#ifdef CONFIG_PAX_REFCOUNT
16481+ jno 0f
16482+ subl $1, (v)
16483+ sbbl $0, 4(v)
16484+ int $4
16485+0:
16486+ _ASM_EXTABLE(0b, 0b)
16487+#endif
16488+
16489+RET_ENDP
16490+BEGIN(inc_unchecked)
16491+ addl $1, (v)
16492+ adcl $0, 4(v)
16493 RET_ENDP
16494 #undef v
16495
16496@@ -114,6 +203,26 @@ BEGIN(inc_return)
16497 movl 4(v), %edx
16498 addl $1, %eax
16499 adcl $0, %edx
16500+
16501+#ifdef CONFIG_PAX_REFCOUNT
16502+ into
16503+1234:
16504+ _ASM_EXTABLE(1234b, 2f)
16505+#endif
16506+
16507+ movl %eax, (v)
16508+ movl %edx, 4(v)
16509+
16510+#ifdef CONFIG_PAX_REFCOUNT
16511+2:
16512+#endif
16513+
16514+RET_ENDP
16515+BEGIN(inc_return_unchecked)
16516+ movl (v), %eax
16517+ movl 4(v), %edx
16518+ addl $1, %eax
16519+ adcl $0, %edx
16520 movl %eax, (v)
16521 movl %edx, 4(v)
16522 RET_ENDP
16523@@ -123,6 +232,20 @@ RET_ENDP
16524 BEGIN(dec)
16525 subl $1, (v)
16526 sbbl $0, 4(v)
16527+
16528+#ifdef CONFIG_PAX_REFCOUNT
16529+ jno 0f
16530+ addl $1, (v)
16531+ adcl $0, 4(v)
16532+ int $4
16533+0:
16534+ _ASM_EXTABLE(0b, 0b)
16535+#endif
16536+
16537+RET_ENDP
16538+BEGIN(dec_unchecked)
16539+ subl $1, (v)
16540+ sbbl $0, 4(v)
16541 RET_ENDP
16542 #undef v
16543
16544@@ -132,6 +255,26 @@ BEGIN(dec_return)
16545 movl 4(v), %edx
16546 subl $1, %eax
16547 sbbl $0, %edx
16548+
16549+#ifdef CONFIG_PAX_REFCOUNT
16550+ into
16551+1234:
16552+ _ASM_EXTABLE(1234b, 2f)
16553+#endif
16554+
16555+ movl %eax, (v)
16556+ movl %edx, 4(v)
16557+
16558+#ifdef CONFIG_PAX_REFCOUNT
16559+2:
16560+#endif
16561+
16562+RET_ENDP
16563+BEGIN(dec_return_unchecked)
16564+ movl (v), %eax
16565+ movl 4(v), %edx
16566+ subl $1, %eax
16567+ sbbl $0, %edx
16568 movl %eax, (v)
16569 movl %edx, 4(v)
16570 RET_ENDP
16571@@ -143,6 +286,13 @@ BEGIN(add_unless)
16572 adcl %edx, %edi
16573 addl (v), %eax
16574 adcl 4(v), %edx
16575+
16576+#ifdef CONFIG_PAX_REFCOUNT
16577+ into
16578+1234:
16579+ _ASM_EXTABLE(1234b, 2f)
16580+#endif
16581+
16582 cmpl %eax, %esi
16583 je 3f
16584 1:
16585@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16586 1:
16587 addl $1, %eax
16588 adcl $0, %edx
16589+
16590+#ifdef CONFIG_PAX_REFCOUNT
16591+ into
16592+1234:
16593+ _ASM_EXTABLE(1234b, 2f)
16594+#endif
16595+
16596 movl %eax, (v)
16597 movl %edx, 4(v)
16598 movl $1, %eax
16599@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16600 movl 4(v), %edx
16601 subl $1, %eax
16602 sbbl $0, %edx
16603+
16604+#ifdef CONFIG_PAX_REFCOUNT
16605+ into
16606+1234:
16607+ _ASM_EXTABLE(1234b, 1f)
16608+#endif
16609+
16610 js 1f
16611 movl %eax, (v)
16612 movl %edx, 4(v)
16613diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16614--- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16615+++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-09-17 18:31:51.000000000 -0400
16616@@ -35,10 +35,24 @@ ENTRY(atomic64_read_cx8)
16617 CFI_STARTPROC
16618
16619 read64 %ecx
16620+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16621+ orb $0x80, 0x7(%rsp)
16622+#endif
16623 ret
16624 CFI_ENDPROC
16625 ENDPROC(atomic64_read_cx8)
16626
16627+ENTRY(atomic64_read_unchecked_cx8)
16628+ CFI_STARTPROC
16629+
16630+ read64 %ecx
16631+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16632+ orb $0x80, 0x7(%rsp)
16633+#endif
16634+ ret
16635+ CFI_ENDPROC
16636+ENDPROC(atomic64_read_unchecked_cx8)
16637+
16638 ENTRY(atomic64_set_cx8)
16639 CFI_STARTPROC
16640
16641@@ -48,10 +62,29 @@ ENTRY(atomic64_set_cx8)
16642 cmpxchg8b (%esi)
16643 jne 1b
16644
16645+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16646+ orb $0x80, 0x7(%rsp)
16647+#endif
16648 ret
16649 CFI_ENDPROC
16650 ENDPROC(atomic64_set_cx8)
16651
16652+ENTRY(atomic64_set_unchecked_cx8)
16653+ CFI_STARTPROC
16654+
16655+1:
16656+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16657+ * are atomic on 586 and newer */
16658+ cmpxchg8b (%esi)
16659+ jne 1b
16660+
16661+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16662+ orb $0x80, 0x7(%rsp)
16663+#endif
16664+ ret
16665+ CFI_ENDPROC
16666+ENDPROC(atomic64_set_unchecked_cx8)
16667+
16668 ENTRY(atomic64_xchg_cx8)
16669 CFI_STARTPROC
16670
16671@@ -62,12 +95,15 @@ ENTRY(atomic64_xchg_cx8)
16672 cmpxchg8b (%esi)
16673 jne 1b
16674
16675+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16676+ orb $0x80, 0x7(%rsp)
16677+#endif
16678 ret
16679 CFI_ENDPROC
16680 ENDPROC(atomic64_xchg_cx8)
16681
16682-.macro addsub_return func ins insc
16683-ENTRY(atomic64_\func\()_return_cx8)
16684+.macro addsub_return func ins insc unchecked=""
16685+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16686 CFI_STARTPROC
16687 SAVE ebp
16688 SAVE ebx
16689@@ -84,27 +120,46 @@ ENTRY(atomic64_\func\()_return_cx8)
16690 movl %edx, %ecx
16691 \ins\()l %esi, %ebx
16692 \insc\()l %edi, %ecx
16693+
16694+.ifb \unchecked
16695+#ifdef CONFIG_PAX_REFCOUNT
16696+ into
16697+2:
16698+ _ASM_EXTABLE(2b, 3f)
16699+#endif
16700+.endif
16701+
16702 LOCK_PREFIX
16703 cmpxchg8b (%ebp)
16704 jne 1b
16705-
16706-10:
16707 movl %ebx, %eax
16708 movl %ecx, %edx
16709+
16710+.ifb \unchecked
16711+#ifdef CONFIG_PAX_REFCOUNT
16712+3:
16713+#endif
16714+.endif
16715+
16716 RESTORE edi
16717 RESTORE esi
16718 RESTORE ebx
16719 RESTORE ebp
16720+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16721+ orb $0x80, 0x7(%rsp)
16722+#endif
16723 ret
16724 CFI_ENDPROC
16725-ENDPROC(atomic64_\func\()_return_cx8)
16726+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16727 .endm
16728
16729 addsub_return add add adc
16730 addsub_return sub sub sbb
16731+addsub_return add add adc _unchecked
16732+addsub_return sub sub sbb _unchecked
16733
16734-.macro incdec_return func ins insc
16735-ENTRY(atomic64_\func\()_return_cx8)
16736+.macro incdec_return func ins insc unchecked
16737+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16738 CFI_STARTPROC
16739 SAVE ebx
16740
16741@@ -114,21 +169,41 @@ ENTRY(atomic64_\func\()_return_cx8)
16742 movl %edx, %ecx
16743 \ins\()l $1, %ebx
16744 \insc\()l $0, %ecx
16745+
16746+.ifb \unchecked
16747+#ifdef CONFIG_PAX_REFCOUNT
16748+ into
16749+2:
16750+ _ASM_EXTABLE(2b, 3f)
16751+#endif
16752+.endif
16753+
16754 LOCK_PREFIX
16755 cmpxchg8b (%esi)
16756 jne 1b
16757
16758-10:
16759 movl %ebx, %eax
16760 movl %ecx, %edx
16761+
16762+.ifb \unchecked
16763+#ifdef CONFIG_PAX_REFCOUNT
16764+3:
16765+#endif
16766+.endif
16767+
16768 RESTORE ebx
16769+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16770+ orb $0x80, 0x7(%rsp)
16771+#endif
16772 ret
16773 CFI_ENDPROC
16774-ENDPROC(atomic64_\func\()_return_cx8)
16775+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16776 .endm
16777
16778 incdec_return inc add adc
16779 incdec_return dec sub sbb
16780+incdec_return inc add adc _unchecked
16781+incdec_return dec sub sbb _unchecked
16782
16783 ENTRY(atomic64_dec_if_positive_cx8)
16784 CFI_STARTPROC
16785@@ -140,6 +215,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16786 movl %edx, %ecx
16787 subl $1, %ebx
16788 sbb $0, %ecx
16789+
16790+#ifdef CONFIG_PAX_REFCOUNT
16791+ into
16792+1234:
16793+ _ASM_EXTABLE(1234b, 2f)
16794+#endif
16795+
16796 js 2f
16797 LOCK_PREFIX
16798 cmpxchg8b (%esi)
16799@@ -149,6 +231,9 @@ ENTRY(atomic64_dec_if_positive_cx8)
16800 movl %ebx, %eax
16801 movl %ecx, %edx
16802 RESTORE ebx
16803+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16804+ orb $0x80, 0x7(%rsp)
16805+#endif
16806 ret
16807 CFI_ENDPROC
16808 ENDPROC(atomic64_dec_if_positive_cx8)
16809@@ -174,6 +259,13 @@ ENTRY(atomic64_add_unless_cx8)
16810 movl %edx, %ecx
16811 addl %esi, %ebx
16812 adcl %edi, %ecx
16813+
16814+#ifdef CONFIG_PAX_REFCOUNT
16815+ into
16816+1234:
16817+ _ASM_EXTABLE(1234b, 3f)
16818+#endif
16819+
16820 LOCK_PREFIX
16821 cmpxchg8b (%ebp)
16822 jne 1b
16823@@ -184,6 +276,9 @@ ENTRY(atomic64_add_unless_cx8)
16824 CFI_ADJUST_CFA_OFFSET -8
16825 RESTORE ebx
16826 RESTORE ebp
16827+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16828+ orb $0x80, 0x7(%rsp)
16829+#endif
16830 ret
16831 4:
16832 cmpl %edx, 4(%esp)
16833@@ -206,6 +301,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16834 movl %edx, %ecx
16835 addl $1, %ebx
16836 adcl $0, %ecx
16837+
16838+#ifdef CONFIG_PAX_REFCOUNT
16839+ into
16840+1234:
16841+ _ASM_EXTABLE(1234b, 3f)
16842+#endif
16843+
16844 LOCK_PREFIX
16845 cmpxchg8b (%esi)
16846 jne 1b
16847@@ -213,6 +315,9 @@ ENTRY(atomic64_inc_not_zero_cx8)
16848 movl $1, %eax
16849 3:
16850 RESTORE ebx
16851+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16852+ orb $0x80, 0x7(%rsp)
16853+#endif
16854 ret
16855 4:
16856 testl %edx, %edx
16857diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16858--- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16859+++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16860@@ -28,7 +28,8 @@
16861 #include <linux/linkage.h>
16862 #include <asm/dwarf2.h>
16863 #include <asm/errno.h>
16864-
16865+#include <asm/segment.h>
16866+
16867 /*
16868 * computes a partial checksum, e.g. for TCP/UDP fragments
16869 */
16870@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16871
16872 #define ARGBASE 16
16873 #define FP 12
16874-
16875-ENTRY(csum_partial_copy_generic)
16876+
16877+ENTRY(csum_partial_copy_generic_to_user)
16878 CFI_STARTPROC
16879+
16880+#ifdef CONFIG_PAX_MEMORY_UDEREF
16881+ pushl_cfi %gs
16882+ popl_cfi %es
16883+ jmp csum_partial_copy_generic
16884+#endif
16885+
16886+ENTRY(csum_partial_copy_generic_from_user)
16887+
16888+#ifdef CONFIG_PAX_MEMORY_UDEREF
16889+ pushl_cfi %gs
16890+ popl_cfi %ds
16891+#endif
16892+
16893+ENTRY(csum_partial_copy_generic)
16894 subl $4,%esp
16895 CFI_ADJUST_CFA_OFFSET 4
16896 pushl_cfi %edi
16897@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16898 jmp 4f
16899 SRC(1: movw (%esi), %bx )
16900 addl $2, %esi
16901-DST( movw %bx, (%edi) )
16902+DST( movw %bx, %es:(%edi) )
16903 addl $2, %edi
16904 addw %bx, %ax
16905 adcl $0, %eax
16906@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16907 SRC(1: movl (%esi), %ebx )
16908 SRC( movl 4(%esi), %edx )
16909 adcl %ebx, %eax
16910-DST( movl %ebx, (%edi) )
16911+DST( movl %ebx, %es:(%edi) )
16912 adcl %edx, %eax
16913-DST( movl %edx, 4(%edi) )
16914+DST( movl %edx, %es:4(%edi) )
16915
16916 SRC( movl 8(%esi), %ebx )
16917 SRC( movl 12(%esi), %edx )
16918 adcl %ebx, %eax
16919-DST( movl %ebx, 8(%edi) )
16920+DST( movl %ebx, %es:8(%edi) )
16921 adcl %edx, %eax
16922-DST( movl %edx, 12(%edi) )
16923+DST( movl %edx, %es:12(%edi) )
16924
16925 SRC( movl 16(%esi), %ebx )
16926 SRC( movl 20(%esi), %edx )
16927 adcl %ebx, %eax
16928-DST( movl %ebx, 16(%edi) )
16929+DST( movl %ebx, %es:16(%edi) )
16930 adcl %edx, %eax
16931-DST( movl %edx, 20(%edi) )
16932+DST( movl %edx, %es:20(%edi) )
16933
16934 SRC( movl 24(%esi), %ebx )
16935 SRC( movl 28(%esi), %edx )
16936 adcl %ebx, %eax
16937-DST( movl %ebx, 24(%edi) )
16938+DST( movl %ebx, %es:24(%edi) )
16939 adcl %edx, %eax
16940-DST( movl %edx, 28(%edi) )
16941+DST( movl %edx, %es:28(%edi) )
16942
16943 lea 32(%esi), %esi
16944 lea 32(%edi), %edi
16945@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16946 shrl $2, %edx # This clears CF
16947 SRC(3: movl (%esi), %ebx )
16948 adcl %ebx, %eax
16949-DST( movl %ebx, (%edi) )
16950+DST( movl %ebx, %es:(%edi) )
16951 lea 4(%esi), %esi
16952 lea 4(%edi), %edi
16953 dec %edx
16954@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16955 jb 5f
16956 SRC( movw (%esi), %cx )
16957 leal 2(%esi), %esi
16958-DST( movw %cx, (%edi) )
16959+DST( movw %cx, %es:(%edi) )
16960 leal 2(%edi), %edi
16961 je 6f
16962 shll $16,%ecx
16963 SRC(5: movb (%esi), %cl )
16964-DST( movb %cl, (%edi) )
16965+DST( movb %cl, %es:(%edi) )
16966 6: addl %ecx, %eax
16967 adcl $0, %eax
16968 7:
16969@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16970
16971 6001:
16972 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16973- movl $-EFAULT, (%ebx)
16974+ movl $-EFAULT, %ss:(%ebx)
16975
16976 # zero the complete destination - computing the rest
16977 # is too much work
16978@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16979
16980 6002:
16981 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16982- movl $-EFAULT,(%ebx)
16983+ movl $-EFAULT,%ss:(%ebx)
16984 jmp 5000b
16985
16986 .previous
16987
16988+ pushl_cfi %ss
16989+ popl_cfi %ds
16990+ pushl_cfi %ss
16991+ popl_cfi %es
16992 popl_cfi %ebx
16993 CFI_RESTORE ebx
16994 popl_cfi %esi
16995@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16996 popl_cfi %ecx # equivalent to addl $4,%esp
16997 ret
16998 CFI_ENDPROC
16999-ENDPROC(csum_partial_copy_generic)
17000+ENDPROC(csum_partial_copy_generic_to_user)
17001
17002 #else
17003
17004 /* Version for PentiumII/PPro */
17005
17006 #define ROUND1(x) \
17007+ nop; nop; nop; \
17008 SRC(movl x(%esi), %ebx ) ; \
17009 addl %ebx, %eax ; \
17010- DST(movl %ebx, x(%edi) ) ;
17011+ DST(movl %ebx, %es:x(%edi)) ;
17012
17013 #define ROUND(x) \
17014+ nop; nop; nop; \
17015 SRC(movl x(%esi), %ebx ) ; \
17016 adcl %ebx, %eax ; \
17017- DST(movl %ebx, x(%edi) ) ;
17018+ DST(movl %ebx, %es:x(%edi)) ;
17019
17020 #define ARGBASE 12
17021-
17022-ENTRY(csum_partial_copy_generic)
17023+
17024+ENTRY(csum_partial_copy_generic_to_user)
17025 CFI_STARTPROC
17026+
17027+#ifdef CONFIG_PAX_MEMORY_UDEREF
17028+ pushl_cfi %gs
17029+ popl_cfi %es
17030+ jmp csum_partial_copy_generic
17031+#endif
17032+
17033+ENTRY(csum_partial_copy_generic_from_user)
17034+
17035+#ifdef CONFIG_PAX_MEMORY_UDEREF
17036+ pushl_cfi %gs
17037+ popl_cfi %ds
17038+#endif
17039+
17040+ENTRY(csum_partial_copy_generic)
17041 pushl_cfi %ebx
17042 CFI_REL_OFFSET ebx, 0
17043 pushl_cfi %edi
17044@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17045 subl %ebx, %edi
17046 lea -1(%esi),%edx
17047 andl $-32,%edx
17048- lea 3f(%ebx,%ebx), %ebx
17049+ lea 3f(%ebx,%ebx,2), %ebx
17050 testl %esi, %esi
17051 jmp *%ebx
17052 1: addl $64,%esi
17053@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17054 jb 5f
17055 SRC( movw (%esi), %dx )
17056 leal 2(%esi), %esi
17057-DST( movw %dx, (%edi) )
17058+DST( movw %dx, %es:(%edi) )
17059 leal 2(%edi), %edi
17060 je 6f
17061 shll $16,%edx
17062 5:
17063 SRC( movb (%esi), %dl )
17064-DST( movb %dl, (%edi) )
17065+DST( movb %dl, %es:(%edi) )
17066 6: addl %edx, %eax
17067 adcl $0, %eax
17068 7:
17069 .section .fixup, "ax"
17070 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17071- movl $-EFAULT, (%ebx)
17072+ movl $-EFAULT, %ss:(%ebx)
17073 # zero the complete destination (computing the rest is too much work)
17074 movl ARGBASE+8(%esp),%edi # dst
17075 movl ARGBASE+12(%esp),%ecx # len
17076@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17077 rep; stosb
17078 jmp 7b
17079 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17080- movl $-EFAULT, (%ebx)
17081+ movl $-EFAULT, %ss:(%ebx)
17082 jmp 7b
17083 .previous
17084
17085+#ifdef CONFIG_PAX_MEMORY_UDEREF
17086+ pushl_cfi %ss
17087+ popl_cfi %ds
17088+ pushl_cfi %ss
17089+ popl_cfi %es
17090+#endif
17091+
17092 popl_cfi %esi
17093 CFI_RESTORE esi
17094 popl_cfi %edi
17095@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17096 CFI_RESTORE ebx
17097 ret
17098 CFI_ENDPROC
17099-ENDPROC(csum_partial_copy_generic)
17100+ENDPROC(csum_partial_copy_generic_to_user)
17101
17102 #undef ROUND
17103 #undef ROUND1
17104diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
17105--- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17106+++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-09-17 18:31:51.000000000 -0400
17107@@ -11,6 +11,9 @@ ENTRY(clear_page_c)
17108 movl $4096/8,%ecx
17109 xorl %eax,%eax
17110 rep stosq
17111+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17112+ orb $0x80, 0x7(%rsp)
17113+#endif
17114 ret
17115 CFI_ENDPROC
17116 ENDPROC(clear_page_c)
17117@@ -20,6 +23,9 @@ ENTRY(clear_page_c_e)
17118 movl $4096,%ecx
17119 xorl %eax,%eax
17120 rep stosb
17121+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17122+ orb $0x80, 0x7(%rsp)
17123+#endif
17124 ret
17125 CFI_ENDPROC
17126 ENDPROC(clear_page_c_e)
17127@@ -43,6 +49,9 @@ ENTRY(clear_page)
17128 leaq 64(%rdi),%rdi
17129 jnz .Lloop
17130 nop
17131+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17132+ orb $0x80, 0x7(%rsp)
17133+#endif
17134 ret
17135 CFI_ENDPROC
17136 .Lclear_page_end:
17137@@ -58,7 +67,7 @@ ENDPROC(clear_page)
17138
17139 #include <asm/cpufeature.h>
17140
17141- .section .altinstr_replacement,"ax"
17142+ .section .altinstr_replacement,"a"
17143 1: .byte 0xeb /* jmp <disp8> */
17144 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17145 2: .byte 0xeb /* jmp <disp8> */
17146diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
17147--- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17148+++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-09-17 18:31:51.000000000 -0400
17149@@ -8,6 +8,9 @@ copy_page_c:
17150 CFI_STARTPROC
17151 movl $4096/8,%ecx
17152 rep movsq
17153+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17154+ orb $0x80, 0x7(%rsp)
17155+#endif
17156 ret
17157 CFI_ENDPROC
17158 ENDPROC(copy_page_c)
17159@@ -94,6 +97,9 @@ ENTRY(copy_page)
17160 CFI_RESTORE r13
17161 addq $3*8,%rsp
17162 CFI_ADJUST_CFA_OFFSET -3*8
17163+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17164+ orb $0x80, 0x7(%rsp)
17165+#endif
17166 ret
17167 .Lcopy_page_end:
17168 CFI_ENDPROC
17169@@ -104,7 +110,7 @@ ENDPROC(copy_page)
17170
17171 #include <asm/cpufeature.h>
17172
17173- .section .altinstr_replacement,"ax"
17174+ .section .altinstr_replacement,"a"
17175 1: .byte 0xeb /* jmp <disp8> */
17176 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17177 2:
17178diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
17179--- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17180+++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-09-17 18:31:51.000000000 -0400
17181@@ -16,6 +16,7 @@
17182 #include <asm/thread_info.h>
17183 #include <asm/cpufeature.h>
17184 #include <asm/alternative-asm.h>
17185+#include <asm/pgtable.h>
17186
17187 /*
17188 * By placing feature2 after feature1 in altinstructions section, we logically
17189@@ -29,7 +30,7 @@
17190 .byte 0xe9 /* 32bit jump */
17191 .long \orig-1f /* by default jump to orig */
17192 1:
17193- .section .altinstr_replacement,"ax"
17194+ .section .altinstr_replacement,"a"
17195 2: .byte 0xe9 /* near jump with 32bit immediate */
17196 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17197 3: .byte 0xe9 /* near jump with 32bit immediate */
17198@@ -71,47 +72,22 @@
17199 #endif
17200 .endm
17201
17202-/* Standard copy_to_user with segment limit checking */
17203-ENTRY(_copy_to_user)
17204- CFI_STARTPROC
17205- GET_THREAD_INFO(%rax)
17206- movq %rdi,%rcx
17207- addq %rdx,%rcx
17208- jc bad_to_user
17209- cmpq TI_addr_limit(%rax),%rcx
17210- ja bad_to_user
17211- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17212- copy_user_generic_unrolled,copy_user_generic_string, \
17213- copy_user_enhanced_fast_string
17214- CFI_ENDPROC
17215-ENDPROC(_copy_to_user)
17216-
17217-/* Standard copy_from_user with segment limit checking */
17218-ENTRY(_copy_from_user)
17219- CFI_STARTPROC
17220- GET_THREAD_INFO(%rax)
17221- movq %rsi,%rcx
17222- addq %rdx,%rcx
17223- jc bad_from_user
17224- cmpq TI_addr_limit(%rax),%rcx
17225- ja bad_from_user
17226- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17227- copy_user_generic_unrolled,copy_user_generic_string, \
17228- copy_user_enhanced_fast_string
17229- CFI_ENDPROC
17230-ENDPROC(_copy_from_user)
17231-
17232 .section .fixup,"ax"
17233 /* must zero dest */
17234 ENTRY(bad_from_user)
17235 bad_from_user:
17236 CFI_STARTPROC
17237+ testl %edx,%edx
17238+ js bad_to_user
17239 movl %edx,%ecx
17240 xorl %eax,%eax
17241 rep
17242 stosb
17243 bad_to_user:
17244 movl %edx,%eax
17245+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17246+ orb $0x80, 0x7(%rsp)
17247+#endif
17248 ret
17249 CFI_ENDPROC
17250 ENDPROC(bad_from_user)
17251@@ -179,6 +155,9 @@ ENTRY(copy_user_generic_unrolled)
17252 decl %ecx
17253 jnz 21b
17254 23: xor %eax,%eax
17255+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17256+ orb $0x80, 0x7(%rsp)
17257+#endif
17258 ret
17259
17260 .section .fixup,"ax"
17261@@ -251,6 +230,9 @@ ENTRY(copy_user_generic_string)
17262 3: rep
17263 movsb
17264 4: xorl %eax,%eax
17265+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17266+ orb $0x80, 0x7(%rsp)
17267+#endif
17268 ret
17269
17270 .section .fixup,"ax"
17271@@ -287,6 +269,9 @@ ENTRY(copy_user_enhanced_fast_string)
17272 1: rep
17273 movsb
17274 2: xorl %eax,%eax
17275+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17276+ orb $0x80, 0x7(%rsp)
17277+#endif
17278 ret
17279
17280 .section .fixup,"ax"
17281diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17282--- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17283+++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-09-17 18:31:51.000000000 -0400
17284@@ -14,6 +14,7 @@
17285 #include <asm/current.h>
17286 #include <asm/asm-offsets.h>
17287 #include <asm/thread_info.h>
17288+#include <asm/pgtable.h>
17289
17290 .macro ALIGN_DESTINATION
17291 #ifdef FIX_ALIGNMENT
17292@@ -50,6 +51,15 @@
17293 */
17294 ENTRY(__copy_user_nocache)
17295 CFI_STARTPROC
17296+
17297+#ifdef CONFIG_PAX_MEMORY_UDEREF
17298+ mov $PAX_USER_SHADOW_BASE,%rcx
17299+ cmp %rcx,%rsi
17300+ jae 1f
17301+ add %rcx,%rsi
17302+1:
17303+#endif
17304+
17305 cmpl $8,%edx
17306 jb 20f /* less then 8 bytes, go to byte copy loop */
17307 ALIGN_DESTINATION
17308@@ -98,6 +108,9 @@ ENTRY(__copy_user_nocache)
17309 jnz 21b
17310 23: xorl %eax,%eax
17311 sfence
17312+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17313+ orb $0x80, 0x7(%rsp)
17314+#endif
17315 ret
17316
17317 .section .fixup,"ax"
17318diff -urNp linux-3.0.4/arch/x86/lib/csum-copy_64.S linux-3.0.4/arch/x86/lib/csum-copy_64.S
17319--- linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17320+++ linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-09-17 18:31:51.000000000 -0400
17321@@ -228,6 +228,9 @@ ENTRY(csum_partial_copy_generic)
17322 CFI_RESTORE rbp
17323 addq $7*8, %rsp
17324 CFI_ADJUST_CFA_OFFSET -7*8
17325+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17326+ orb $0x80, 0x7(%rsp)
17327+#endif
17328 ret
17329 CFI_RESTORE_STATE
17330
17331diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17332--- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17333+++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17334@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17335 len -= 2;
17336 }
17337 }
17338+
17339+#ifdef CONFIG_PAX_MEMORY_UDEREF
17340+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17341+ src += PAX_USER_SHADOW_BASE;
17342+#endif
17343+
17344 isum = csum_partial_copy_generic((__force const void *)src,
17345 dst, len, isum, errp, NULL);
17346 if (unlikely(*errp))
17347@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17348 }
17349
17350 *errp = 0;
17351+
17352+#ifdef CONFIG_PAX_MEMORY_UDEREF
17353+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17354+ dst += PAX_USER_SHADOW_BASE;
17355+#endif
17356+
17357 return csum_partial_copy_generic(src, (void __force *)dst,
17358 len, isum, NULL, errp);
17359 }
17360diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17361--- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17362+++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17363@@ -33,14 +33,35 @@
17364 #include <asm/asm-offsets.h>
17365 #include <asm/thread_info.h>
17366 #include <asm/asm.h>
17367+#include <asm/segment.h>
17368+#include <asm/pgtable.h>
17369+
17370+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17371+#define __copyuser_seg gs;
17372+#else
17373+#define __copyuser_seg
17374+#endif
17375
17376 .text
17377 ENTRY(__get_user_1)
17378 CFI_STARTPROC
17379+
17380+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17381 GET_THREAD_INFO(%_ASM_DX)
17382 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17383 jae bad_get_user
17384-1: movzb (%_ASM_AX),%edx
17385+
17386+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17387+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17388+ cmp %_ASM_DX,%_ASM_AX
17389+ jae 1234f
17390+ add %_ASM_DX,%_ASM_AX
17391+1234:
17392+#endif
17393+
17394+#endif
17395+
17396+1: __copyuser_seg movzb (%_ASM_AX),%edx
17397 xor %eax,%eax
17398 ret
17399 CFI_ENDPROC
17400@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17401 ENTRY(__get_user_2)
17402 CFI_STARTPROC
17403 add $1,%_ASM_AX
17404+
17405+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17406 jc bad_get_user
17407 GET_THREAD_INFO(%_ASM_DX)
17408 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17409 jae bad_get_user
17410-2: movzwl -1(%_ASM_AX),%edx
17411+
17412+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17413+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17414+ cmp %_ASM_DX,%_ASM_AX
17415+ jae 1234f
17416+ add %_ASM_DX,%_ASM_AX
17417+1234:
17418+#endif
17419+
17420+#endif
17421+
17422+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17423 xor %eax,%eax
17424 ret
17425 CFI_ENDPROC
17426@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17427 ENTRY(__get_user_4)
17428 CFI_STARTPROC
17429 add $3,%_ASM_AX
17430+
17431+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17432 jc bad_get_user
17433 GET_THREAD_INFO(%_ASM_DX)
17434 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17435 jae bad_get_user
17436-3: mov -3(%_ASM_AX),%edx
17437+
17438+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17439+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17440+ cmp %_ASM_DX,%_ASM_AX
17441+ jae 1234f
17442+ add %_ASM_DX,%_ASM_AX
17443+1234:
17444+#endif
17445+
17446+#endif
17447+
17448+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17449 xor %eax,%eax
17450 ret
17451 CFI_ENDPROC
17452@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17453 GET_THREAD_INFO(%_ASM_DX)
17454 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17455 jae bad_get_user
17456+
17457+#ifdef CONFIG_PAX_MEMORY_UDEREF
17458+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17459+ cmp %_ASM_DX,%_ASM_AX
17460+ jae 1234f
17461+ add %_ASM_DX,%_ASM_AX
17462+1234:
17463+#endif
17464+
17465 4: movq -7(%_ASM_AX),%_ASM_DX
17466 xor %eax,%eax
17467 ret
17468diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17469--- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17470+++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17471@@ -21,6 +21,11 @@
17472 #include <linux/string.h>
17473 #include <asm/inat.h>
17474 #include <asm/insn.h>
17475+#ifdef __KERNEL__
17476+#include <asm/pgtable_types.h>
17477+#else
17478+#define ktla_ktva(addr) addr
17479+#endif
17480
17481 #define get_next(t, insn) \
17482 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17483@@ -40,8 +45,8 @@
17484 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17485 {
17486 memset(insn, 0, sizeof(*insn));
17487- insn->kaddr = kaddr;
17488- insn->next_byte = kaddr;
17489+ insn->kaddr = ktla_ktva(kaddr);
17490+ insn->next_byte = ktla_ktva(kaddr);
17491 insn->x86_64 = x86_64 ? 1 : 0;
17492 insn->opnd_bytes = 4;
17493 if (x86_64)
17494diff -urNp linux-3.0.4/arch/x86/lib/iomap_copy_64.S linux-3.0.4/arch/x86/lib/iomap_copy_64.S
17495--- linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17496+++ linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-09-17 18:31:51.000000000 -0400
17497@@ -25,6 +25,9 @@ ENTRY(__iowrite32_copy)
17498 CFI_STARTPROC
17499 movl %edx,%ecx
17500 rep movsd
17501+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17502+ orb $0x80, 0x7(%rsp)
17503+#endif
17504 ret
17505 CFI_ENDPROC
17506 ENDPROC(__iowrite32_copy)
17507diff -urNp linux-3.0.4/arch/x86/lib/memcpy_64.S linux-3.0.4/arch/x86/lib/memcpy_64.S
17508--- linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17509+++ linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-09-17 18:31:51.000000000 -0400
17510@@ -34,6 +34,9 @@
17511 rep movsq
17512 movl %edx, %ecx
17513 rep movsb
17514+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17515+ orb $0x80, 0x7(%rsp)
17516+#endif
17517 ret
17518 .Lmemcpy_e:
17519 .previous
17520@@ -51,6 +54,9 @@
17521
17522 movl %edx, %ecx
17523 rep movsb
17524+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17525+ orb $0x80, 0x7(%rsp)
17526+#endif
17527 ret
17528 .Lmemcpy_e_e:
17529 .previous
17530@@ -141,6 +147,9 @@ ENTRY(memcpy)
17531 movq %r9, 1*8(%rdi)
17532 movq %r10, -2*8(%rdi, %rdx)
17533 movq %r11, -1*8(%rdi, %rdx)
17534+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17535+ orb $0x80, 0x7(%rsp)
17536+#endif
17537 retq
17538 .p2align 4
17539 .Lless_16bytes:
17540@@ -153,6 +162,9 @@ ENTRY(memcpy)
17541 movq -1*8(%rsi, %rdx), %r9
17542 movq %r8, 0*8(%rdi)
17543 movq %r9, -1*8(%rdi, %rdx)
17544+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17545+ orb $0x80, 0x7(%rsp)
17546+#endif
17547 retq
17548 .p2align 4
17549 .Lless_8bytes:
17550@@ -166,6 +178,9 @@ ENTRY(memcpy)
17551 movl -4(%rsi, %rdx), %r8d
17552 movl %ecx, (%rdi)
17553 movl %r8d, -4(%rdi, %rdx)
17554+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17555+ orb $0x80, 0x7(%rsp)
17556+#endif
17557 retq
17558 .p2align 4
17559 .Lless_3bytes:
17560@@ -183,6 +198,9 @@ ENTRY(memcpy)
17561 jnz .Lloop_1
17562
17563 .Lend:
17564+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17565+ orb $0x80, 0x7(%rsp)
17566+#endif
17567 retq
17568 CFI_ENDPROC
17569 ENDPROC(memcpy)
17570diff -urNp linux-3.0.4/arch/x86/lib/memmove_64.S linux-3.0.4/arch/x86/lib/memmove_64.S
17571--- linux-3.0.4/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17572+++ linux-3.0.4/arch/x86/lib/memmove_64.S 2011-09-17 18:31:51.000000000 -0400
17573@@ -201,6 +201,9 @@ ENTRY(memmove)
17574 movb (%rsi), %r11b
17575 movb %r11b, (%rdi)
17576 13:
17577+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17578+ orb $0x80, 0x7(%rsp)
17579+#endif
17580 retq
17581 CFI_ENDPROC
17582
17583@@ -209,6 +212,9 @@ ENTRY(memmove)
17584 /* Forward moving data. */
17585 movq %rdx, %rcx
17586 rep movsb
17587+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17588+ orb $0x80, 0x7(%rsp)
17589+#endif
17590 retq
17591 .Lmemmove_end_forward_efs:
17592 .previous
17593diff -urNp linux-3.0.4/arch/x86/lib/memset_64.S linux-3.0.4/arch/x86/lib/memset_64.S
17594--- linux-3.0.4/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17595+++ linux-3.0.4/arch/x86/lib/memset_64.S 2011-09-17 18:31:51.000000000 -0400
17596@@ -31,6 +31,9 @@
17597 movl %r8d,%ecx
17598 rep stosb
17599 movq %r9,%rax
17600+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17601+ orb $0x80, 0x7(%rsp)
17602+#endif
17603 ret
17604 .Lmemset_e:
17605 .previous
17606@@ -53,6 +56,9 @@
17607 movl %edx,%ecx
17608 rep stosb
17609 movq %r9,%rax
17610+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17611+ orb $0x80, 0x7(%rsp)
17612+#endif
17613 ret
17614 .Lmemset_e_e:
17615 .previous
17616@@ -121,6 +127,9 @@ ENTRY(__memset)
17617
17618 .Lende:
17619 movq %r10,%rax
17620+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17621+ orb $0x80, 0x7(%rsp)
17622+#endif
17623 ret
17624
17625 CFI_RESTORE_STATE
17626diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17627--- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17628+++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17629@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17630 {
17631 void *p;
17632 int i;
17633+ unsigned long cr0;
17634
17635 if (unlikely(in_interrupt()))
17636 return __memcpy(to, from, len);
17637@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17638 kernel_fpu_begin();
17639
17640 __asm__ __volatile__ (
17641- "1: prefetch (%0)\n" /* This set is 28 bytes */
17642- " prefetch 64(%0)\n"
17643- " prefetch 128(%0)\n"
17644- " prefetch 192(%0)\n"
17645- " prefetch 256(%0)\n"
17646+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17647+ " prefetch 64(%1)\n"
17648+ " prefetch 128(%1)\n"
17649+ " prefetch 192(%1)\n"
17650+ " prefetch 256(%1)\n"
17651 "2: \n"
17652 ".section .fixup, \"ax\"\n"
17653- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17654+ "3: \n"
17655+
17656+#ifdef CONFIG_PAX_KERNEXEC
17657+ " movl %%cr0, %0\n"
17658+ " movl %0, %%eax\n"
17659+ " andl $0xFFFEFFFF, %%eax\n"
17660+ " movl %%eax, %%cr0\n"
17661+#endif
17662+
17663+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17664+
17665+#ifdef CONFIG_PAX_KERNEXEC
17666+ " movl %0, %%cr0\n"
17667+#endif
17668+
17669 " jmp 2b\n"
17670 ".previous\n"
17671 _ASM_EXTABLE(1b, 3b)
17672- : : "r" (from));
17673+ : "=&r" (cr0) : "r" (from) : "ax");
17674
17675 for ( ; i > 5; i--) {
17676 __asm__ __volatile__ (
17677- "1: prefetch 320(%0)\n"
17678- "2: movq (%0), %%mm0\n"
17679- " movq 8(%0), %%mm1\n"
17680- " movq 16(%0), %%mm2\n"
17681- " movq 24(%0), %%mm3\n"
17682- " movq %%mm0, (%1)\n"
17683- " movq %%mm1, 8(%1)\n"
17684- " movq %%mm2, 16(%1)\n"
17685- " movq %%mm3, 24(%1)\n"
17686- " movq 32(%0), %%mm0\n"
17687- " movq 40(%0), %%mm1\n"
17688- " movq 48(%0), %%mm2\n"
17689- " movq 56(%0), %%mm3\n"
17690- " movq %%mm0, 32(%1)\n"
17691- " movq %%mm1, 40(%1)\n"
17692- " movq %%mm2, 48(%1)\n"
17693- " movq %%mm3, 56(%1)\n"
17694+ "1: prefetch 320(%1)\n"
17695+ "2: movq (%1), %%mm0\n"
17696+ " movq 8(%1), %%mm1\n"
17697+ " movq 16(%1), %%mm2\n"
17698+ " movq 24(%1), %%mm3\n"
17699+ " movq %%mm0, (%2)\n"
17700+ " movq %%mm1, 8(%2)\n"
17701+ " movq %%mm2, 16(%2)\n"
17702+ " movq %%mm3, 24(%2)\n"
17703+ " movq 32(%1), %%mm0\n"
17704+ " movq 40(%1), %%mm1\n"
17705+ " movq 48(%1), %%mm2\n"
17706+ " movq 56(%1), %%mm3\n"
17707+ " movq %%mm0, 32(%2)\n"
17708+ " movq %%mm1, 40(%2)\n"
17709+ " movq %%mm2, 48(%2)\n"
17710+ " movq %%mm3, 56(%2)\n"
17711 ".section .fixup, \"ax\"\n"
17712- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17713+ "3:\n"
17714+
17715+#ifdef CONFIG_PAX_KERNEXEC
17716+ " movl %%cr0, %0\n"
17717+ " movl %0, %%eax\n"
17718+ " andl $0xFFFEFFFF, %%eax\n"
17719+ " movl %%eax, %%cr0\n"
17720+#endif
17721+
17722+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17723+
17724+#ifdef CONFIG_PAX_KERNEXEC
17725+ " movl %0, %%cr0\n"
17726+#endif
17727+
17728 " jmp 2b\n"
17729 ".previous\n"
17730 _ASM_EXTABLE(1b, 3b)
17731- : : "r" (from), "r" (to) : "memory");
17732+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17733
17734 from += 64;
17735 to += 64;
17736@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17737 static void fast_copy_page(void *to, void *from)
17738 {
17739 int i;
17740+ unsigned long cr0;
17741
17742 kernel_fpu_begin();
17743
17744@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17745 * but that is for later. -AV
17746 */
17747 __asm__ __volatile__(
17748- "1: prefetch (%0)\n"
17749- " prefetch 64(%0)\n"
17750- " prefetch 128(%0)\n"
17751- " prefetch 192(%0)\n"
17752- " prefetch 256(%0)\n"
17753+ "1: prefetch (%1)\n"
17754+ " prefetch 64(%1)\n"
17755+ " prefetch 128(%1)\n"
17756+ " prefetch 192(%1)\n"
17757+ " prefetch 256(%1)\n"
17758 "2: \n"
17759 ".section .fixup, \"ax\"\n"
17760- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17761+ "3: \n"
17762+
17763+#ifdef CONFIG_PAX_KERNEXEC
17764+ " movl %%cr0, %0\n"
17765+ " movl %0, %%eax\n"
17766+ " andl $0xFFFEFFFF, %%eax\n"
17767+ " movl %%eax, %%cr0\n"
17768+#endif
17769+
17770+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17771+
17772+#ifdef CONFIG_PAX_KERNEXEC
17773+ " movl %0, %%cr0\n"
17774+#endif
17775+
17776 " jmp 2b\n"
17777 ".previous\n"
17778- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17779+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17780
17781 for (i = 0; i < (4096-320)/64; i++) {
17782 __asm__ __volatile__ (
17783- "1: prefetch 320(%0)\n"
17784- "2: movq (%0), %%mm0\n"
17785- " movntq %%mm0, (%1)\n"
17786- " movq 8(%0), %%mm1\n"
17787- " movntq %%mm1, 8(%1)\n"
17788- " movq 16(%0), %%mm2\n"
17789- " movntq %%mm2, 16(%1)\n"
17790- " movq 24(%0), %%mm3\n"
17791- " movntq %%mm3, 24(%1)\n"
17792- " movq 32(%0), %%mm4\n"
17793- " movntq %%mm4, 32(%1)\n"
17794- " movq 40(%0), %%mm5\n"
17795- " movntq %%mm5, 40(%1)\n"
17796- " movq 48(%0), %%mm6\n"
17797- " movntq %%mm6, 48(%1)\n"
17798- " movq 56(%0), %%mm7\n"
17799- " movntq %%mm7, 56(%1)\n"
17800+ "1: prefetch 320(%1)\n"
17801+ "2: movq (%1), %%mm0\n"
17802+ " movntq %%mm0, (%2)\n"
17803+ " movq 8(%1), %%mm1\n"
17804+ " movntq %%mm1, 8(%2)\n"
17805+ " movq 16(%1), %%mm2\n"
17806+ " movntq %%mm2, 16(%2)\n"
17807+ " movq 24(%1), %%mm3\n"
17808+ " movntq %%mm3, 24(%2)\n"
17809+ " movq 32(%1), %%mm4\n"
17810+ " movntq %%mm4, 32(%2)\n"
17811+ " movq 40(%1), %%mm5\n"
17812+ " movntq %%mm5, 40(%2)\n"
17813+ " movq 48(%1), %%mm6\n"
17814+ " movntq %%mm6, 48(%2)\n"
17815+ " movq 56(%1), %%mm7\n"
17816+ " movntq %%mm7, 56(%2)\n"
17817 ".section .fixup, \"ax\"\n"
17818- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17819+ "3:\n"
17820+
17821+#ifdef CONFIG_PAX_KERNEXEC
17822+ " movl %%cr0, %0\n"
17823+ " movl %0, %%eax\n"
17824+ " andl $0xFFFEFFFF, %%eax\n"
17825+ " movl %%eax, %%cr0\n"
17826+#endif
17827+
17828+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17829+
17830+#ifdef CONFIG_PAX_KERNEXEC
17831+ " movl %0, %%cr0\n"
17832+#endif
17833+
17834 " jmp 2b\n"
17835 ".previous\n"
17836- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17837+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17838
17839 from += 64;
17840 to += 64;
17841@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17842 static void fast_copy_page(void *to, void *from)
17843 {
17844 int i;
17845+ unsigned long cr0;
17846
17847 kernel_fpu_begin();
17848
17849 __asm__ __volatile__ (
17850- "1: prefetch (%0)\n"
17851- " prefetch 64(%0)\n"
17852- " prefetch 128(%0)\n"
17853- " prefetch 192(%0)\n"
17854- " prefetch 256(%0)\n"
17855+ "1: prefetch (%1)\n"
17856+ " prefetch 64(%1)\n"
17857+ " prefetch 128(%1)\n"
17858+ " prefetch 192(%1)\n"
17859+ " prefetch 256(%1)\n"
17860 "2: \n"
17861 ".section .fixup, \"ax\"\n"
17862- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17863+ "3: \n"
17864+
17865+#ifdef CONFIG_PAX_KERNEXEC
17866+ " movl %%cr0, %0\n"
17867+ " movl %0, %%eax\n"
17868+ " andl $0xFFFEFFFF, %%eax\n"
17869+ " movl %%eax, %%cr0\n"
17870+#endif
17871+
17872+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17873+
17874+#ifdef CONFIG_PAX_KERNEXEC
17875+ " movl %0, %%cr0\n"
17876+#endif
17877+
17878 " jmp 2b\n"
17879 ".previous\n"
17880- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17881+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17882
17883 for (i = 0; i < 4096/64; i++) {
17884 __asm__ __volatile__ (
17885- "1: prefetch 320(%0)\n"
17886- "2: movq (%0), %%mm0\n"
17887- " movq 8(%0), %%mm1\n"
17888- " movq 16(%0), %%mm2\n"
17889- " movq 24(%0), %%mm3\n"
17890- " movq %%mm0, (%1)\n"
17891- " movq %%mm1, 8(%1)\n"
17892- " movq %%mm2, 16(%1)\n"
17893- " movq %%mm3, 24(%1)\n"
17894- " movq 32(%0), %%mm0\n"
17895- " movq 40(%0), %%mm1\n"
17896- " movq 48(%0), %%mm2\n"
17897- " movq 56(%0), %%mm3\n"
17898- " movq %%mm0, 32(%1)\n"
17899- " movq %%mm1, 40(%1)\n"
17900- " movq %%mm2, 48(%1)\n"
17901- " movq %%mm3, 56(%1)\n"
17902+ "1: prefetch 320(%1)\n"
17903+ "2: movq (%1), %%mm0\n"
17904+ " movq 8(%1), %%mm1\n"
17905+ " movq 16(%1), %%mm2\n"
17906+ " movq 24(%1), %%mm3\n"
17907+ " movq %%mm0, (%2)\n"
17908+ " movq %%mm1, 8(%2)\n"
17909+ " movq %%mm2, 16(%2)\n"
17910+ " movq %%mm3, 24(%2)\n"
17911+ " movq 32(%1), %%mm0\n"
17912+ " movq 40(%1), %%mm1\n"
17913+ " movq 48(%1), %%mm2\n"
17914+ " movq 56(%1), %%mm3\n"
17915+ " movq %%mm0, 32(%2)\n"
17916+ " movq %%mm1, 40(%2)\n"
17917+ " movq %%mm2, 48(%2)\n"
17918+ " movq %%mm3, 56(%2)\n"
17919 ".section .fixup, \"ax\"\n"
17920- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17921+ "3:\n"
17922+
17923+#ifdef CONFIG_PAX_KERNEXEC
17924+ " movl %%cr0, %0\n"
17925+ " movl %0, %%eax\n"
17926+ " andl $0xFFFEFFFF, %%eax\n"
17927+ " movl %%eax, %%cr0\n"
17928+#endif
17929+
17930+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17931+
17932+#ifdef CONFIG_PAX_KERNEXEC
17933+ " movl %0, %%cr0\n"
17934+#endif
17935+
17936 " jmp 2b\n"
17937 ".previous\n"
17938 _ASM_EXTABLE(1b, 3b)
17939- : : "r" (from), "r" (to) : "memory");
17940+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17941
17942 from += 64;
17943 to += 64;
17944diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17945--- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17946+++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17947@@ -15,7 +15,8 @@
17948 #include <asm/thread_info.h>
17949 #include <asm/errno.h>
17950 #include <asm/asm.h>
17951-
17952+#include <asm/segment.h>
17953+#include <asm/pgtable.h>
17954
17955 /*
17956 * __put_user_X
17957@@ -29,52 +30,119 @@
17958 * as they get called from within inline assembly.
17959 */
17960
17961-#define ENTER CFI_STARTPROC ; \
17962- GET_THREAD_INFO(%_ASM_BX)
17963+#define ENTER CFI_STARTPROC
17964 #define EXIT ret ; \
17965 CFI_ENDPROC
17966
17967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17968+#define _DEST %_ASM_CX,%_ASM_BX
17969+#else
17970+#define _DEST %_ASM_CX
17971+#endif
17972+
17973+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17974+#define __copyuser_seg gs;
17975+#else
17976+#define __copyuser_seg
17977+#endif
17978+
17979 .text
17980 ENTRY(__put_user_1)
17981 ENTER
17982+
17983+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17984+ GET_THREAD_INFO(%_ASM_BX)
17985 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17986 jae bad_put_user
17987-1: movb %al,(%_ASM_CX)
17988+
17989+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17990+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17991+ cmp %_ASM_BX,%_ASM_CX
17992+ jb 1234f
17993+ xor %ebx,%ebx
17994+1234:
17995+#endif
17996+
17997+#endif
17998+
17999+1: __copyuser_seg movb %al,(_DEST)
18000 xor %eax,%eax
18001 EXIT
18002 ENDPROC(__put_user_1)
18003
18004 ENTRY(__put_user_2)
18005 ENTER
18006+
18007+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18008+ GET_THREAD_INFO(%_ASM_BX)
18009 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18010 sub $1,%_ASM_BX
18011 cmp %_ASM_BX,%_ASM_CX
18012 jae bad_put_user
18013-2: movw %ax,(%_ASM_CX)
18014+
18015+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18016+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18017+ cmp %_ASM_BX,%_ASM_CX
18018+ jb 1234f
18019+ xor %ebx,%ebx
18020+1234:
18021+#endif
18022+
18023+#endif
18024+
18025+2: __copyuser_seg movw %ax,(_DEST)
18026 xor %eax,%eax
18027 EXIT
18028 ENDPROC(__put_user_2)
18029
18030 ENTRY(__put_user_4)
18031 ENTER
18032+
18033+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18034+ GET_THREAD_INFO(%_ASM_BX)
18035 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18036 sub $3,%_ASM_BX
18037 cmp %_ASM_BX,%_ASM_CX
18038 jae bad_put_user
18039-3: movl %eax,(%_ASM_CX)
18040+
18041+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18042+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18043+ cmp %_ASM_BX,%_ASM_CX
18044+ jb 1234f
18045+ xor %ebx,%ebx
18046+1234:
18047+#endif
18048+
18049+#endif
18050+
18051+3: __copyuser_seg movl %eax,(_DEST)
18052 xor %eax,%eax
18053 EXIT
18054 ENDPROC(__put_user_4)
18055
18056 ENTRY(__put_user_8)
18057 ENTER
18058+
18059+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18060+ GET_THREAD_INFO(%_ASM_BX)
18061 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18062 sub $7,%_ASM_BX
18063 cmp %_ASM_BX,%_ASM_CX
18064 jae bad_put_user
18065-4: mov %_ASM_AX,(%_ASM_CX)
18066+
18067+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18068+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18069+ cmp %_ASM_BX,%_ASM_CX
18070+ jb 1234f
18071+ xor %ebx,%ebx
18072+1234:
18073+#endif
18074+
18075+#endif
18076+
18077+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18078 #ifdef CONFIG_X86_32
18079-5: movl %edx,4(%_ASM_CX)
18080+5: __copyuser_seg movl %edx,4(_DEST)
18081 #endif
18082 xor %eax,%eax
18083 EXIT
18084diff -urNp linux-3.0.4/arch/x86/lib/rwlock_64.S linux-3.0.4/arch/x86/lib/rwlock_64.S
18085--- linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18086+++ linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-09-17 18:31:51.000000000 -0400
18087@@ -17,6 +17,9 @@ ENTRY(__write_lock_failed)
18088 LOCK_PREFIX
18089 subl $RW_LOCK_BIAS,(%rdi)
18090 jnz __write_lock_failed
18091+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18092+ orb $0x80, 0x7(%rsp)
18093+#endif
18094 ret
18095 CFI_ENDPROC
18096 END(__write_lock_failed)
18097@@ -33,6 +36,9 @@ ENTRY(__read_lock_failed)
18098 LOCK_PREFIX
18099 decl (%rdi)
18100 js __read_lock_failed
18101+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18102+ orb $0x80, 0x7(%rsp)
18103+#endif
18104 ret
18105 CFI_ENDPROC
18106 END(__read_lock_failed)
18107diff -urNp linux-3.0.4/arch/x86/lib/rwsem_64.S linux-3.0.4/arch/x86/lib/rwsem_64.S
18108--- linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18109+++ linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-09-17 18:31:51.000000000 -0400
18110@@ -51,6 +51,9 @@ ENTRY(call_rwsem_down_read_failed)
18111 popq_cfi %rdx
18112 CFI_RESTORE rdx
18113 restore_common_regs
18114+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18115+ orb $0x80, 0x7(%rsp)
18116+#endif
18117 ret
18118 CFI_ENDPROC
18119 ENDPROC(call_rwsem_down_read_failed)
18120@@ -61,6 +64,9 @@ ENTRY(call_rwsem_down_write_failed)
18121 movq %rax,%rdi
18122 call rwsem_down_write_failed
18123 restore_common_regs
18124+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18125+ orb $0x80, 0x7(%rsp)
18126+#endif
18127 ret
18128 CFI_ENDPROC
18129 ENDPROC(call_rwsem_down_write_failed)
18130@@ -73,6 +79,9 @@ ENTRY(call_rwsem_wake)
18131 movq %rax,%rdi
18132 call rwsem_wake
18133 restore_common_regs
18134+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18135+ orb $0x80, 0x7(%rsp)
18136+#endif
18137 1: ret
18138 CFI_ENDPROC
18139 ENDPROC(call_rwsem_wake)
18140@@ -88,6 +97,9 @@ ENTRY(call_rwsem_downgrade_wake)
18141 popq_cfi %rdx
18142 CFI_RESTORE rdx
18143 restore_common_regs
18144+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18145+ orb $0x80, 0x7(%rsp)
18146+#endif
18147 ret
18148 CFI_ENDPROC
18149 ENDPROC(call_rwsem_downgrade_wake)
18150diff -urNp linux-3.0.4/arch/x86/lib/thunk_64.S linux-3.0.4/arch/x86/lib/thunk_64.S
18151--- linux-3.0.4/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18152+++ linux-3.0.4/arch/x86/lib/thunk_64.S 2011-09-17 18:31:51.000000000 -0400
18153@@ -50,5 +50,8 @@
18154 SAVE_ARGS
18155 restore:
18156 RESTORE_ARGS
18157+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18158+ orb $0x80, 0x7(%rsp)
18159+#endif
18160 ret
18161 CFI_ENDPROC
18162diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
18163--- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18164+++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18165@@ -43,7 +43,7 @@ do { \
18166 __asm__ __volatile__( \
18167 " testl %1,%1\n" \
18168 " jz 2f\n" \
18169- "0: lodsb\n" \
18170+ "0: "__copyuser_seg"lodsb\n" \
18171 " stosb\n" \
18172 " testb %%al,%%al\n" \
18173 " jz 1f\n" \
18174@@ -128,10 +128,12 @@ do { \
18175 int __d0; \
18176 might_fault(); \
18177 __asm__ __volatile__( \
18178+ __COPYUSER_SET_ES \
18179 "0: rep; stosl\n" \
18180 " movl %2,%0\n" \
18181 "1: rep; stosb\n" \
18182 "2:\n" \
18183+ __COPYUSER_RESTORE_ES \
18184 ".section .fixup,\"ax\"\n" \
18185 "3: lea 0(%2,%0,4),%0\n" \
18186 " jmp 2b\n" \
18187@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18188 might_fault();
18189
18190 __asm__ __volatile__(
18191+ __COPYUSER_SET_ES
18192 " testl %0, %0\n"
18193 " jz 3f\n"
18194 " andl %0,%%ecx\n"
18195@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18196 " subl %%ecx,%0\n"
18197 " addl %0,%%eax\n"
18198 "1:\n"
18199+ __COPYUSER_RESTORE_ES
18200 ".section .fixup,\"ax\"\n"
18201 "2: xorl %%eax,%%eax\n"
18202 " jmp 1b\n"
18203@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18204
18205 #ifdef CONFIG_X86_INTEL_USERCOPY
18206 static unsigned long
18207-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18208+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18209 {
18210 int d0, d1;
18211 __asm__ __volatile__(
18212@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18213 " .align 2,0x90\n"
18214 "3: movl 0(%4), %%eax\n"
18215 "4: movl 4(%4), %%edx\n"
18216- "5: movl %%eax, 0(%3)\n"
18217- "6: movl %%edx, 4(%3)\n"
18218+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18219+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18220 "7: movl 8(%4), %%eax\n"
18221 "8: movl 12(%4),%%edx\n"
18222- "9: movl %%eax, 8(%3)\n"
18223- "10: movl %%edx, 12(%3)\n"
18224+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18225+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18226 "11: movl 16(%4), %%eax\n"
18227 "12: movl 20(%4), %%edx\n"
18228- "13: movl %%eax, 16(%3)\n"
18229- "14: movl %%edx, 20(%3)\n"
18230+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18231+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18232 "15: movl 24(%4), %%eax\n"
18233 "16: movl 28(%4), %%edx\n"
18234- "17: movl %%eax, 24(%3)\n"
18235- "18: movl %%edx, 28(%3)\n"
18236+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18237+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18238 "19: movl 32(%4), %%eax\n"
18239 "20: movl 36(%4), %%edx\n"
18240- "21: movl %%eax, 32(%3)\n"
18241- "22: movl %%edx, 36(%3)\n"
18242+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18243+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18244 "23: movl 40(%4), %%eax\n"
18245 "24: movl 44(%4), %%edx\n"
18246- "25: movl %%eax, 40(%3)\n"
18247- "26: movl %%edx, 44(%3)\n"
18248+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18249+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18250 "27: movl 48(%4), %%eax\n"
18251 "28: movl 52(%4), %%edx\n"
18252- "29: movl %%eax, 48(%3)\n"
18253- "30: movl %%edx, 52(%3)\n"
18254+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18255+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18256 "31: movl 56(%4), %%eax\n"
18257 "32: movl 60(%4), %%edx\n"
18258- "33: movl %%eax, 56(%3)\n"
18259- "34: movl %%edx, 60(%3)\n"
18260+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18261+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18262 " addl $-64, %0\n"
18263 " addl $64, %4\n"
18264 " addl $64, %3\n"
18265@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18266 " shrl $2, %0\n"
18267 " andl $3, %%eax\n"
18268 " cld\n"
18269+ __COPYUSER_SET_ES
18270 "99: rep; movsl\n"
18271 "36: movl %%eax, %0\n"
18272 "37: rep; movsb\n"
18273 "100:\n"
18274+ __COPYUSER_RESTORE_ES
18275+ ".section .fixup,\"ax\"\n"
18276+ "101: lea 0(%%eax,%0,4),%0\n"
18277+ " jmp 100b\n"
18278+ ".previous\n"
18279+ ".section __ex_table,\"a\"\n"
18280+ " .align 4\n"
18281+ " .long 1b,100b\n"
18282+ " .long 2b,100b\n"
18283+ " .long 3b,100b\n"
18284+ " .long 4b,100b\n"
18285+ " .long 5b,100b\n"
18286+ " .long 6b,100b\n"
18287+ " .long 7b,100b\n"
18288+ " .long 8b,100b\n"
18289+ " .long 9b,100b\n"
18290+ " .long 10b,100b\n"
18291+ " .long 11b,100b\n"
18292+ " .long 12b,100b\n"
18293+ " .long 13b,100b\n"
18294+ " .long 14b,100b\n"
18295+ " .long 15b,100b\n"
18296+ " .long 16b,100b\n"
18297+ " .long 17b,100b\n"
18298+ " .long 18b,100b\n"
18299+ " .long 19b,100b\n"
18300+ " .long 20b,100b\n"
18301+ " .long 21b,100b\n"
18302+ " .long 22b,100b\n"
18303+ " .long 23b,100b\n"
18304+ " .long 24b,100b\n"
18305+ " .long 25b,100b\n"
18306+ " .long 26b,100b\n"
18307+ " .long 27b,100b\n"
18308+ " .long 28b,100b\n"
18309+ " .long 29b,100b\n"
18310+ " .long 30b,100b\n"
18311+ " .long 31b,100b\n"
18312+ " .long 32b,100b\n"
18313+ " .long 33b,100b\n"
18314+ " .long 34b,100b\n"
18315+ " .long 35b,100b\n"
18316+ " .long 36b,100b\n"
18317+ " .long 37b,100b\n"
18318+ " .long 99b,101b\n"
18319+ ".previous"
18320+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
18321+ : "1"(to), "2"(from), "0"(size)
18322+ : "eax", "edx", "memory");
18323+ return size;
18324+}
18325+
18326+static unsigned long
18327+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18328+{
18329+ int d0, d1;
18330+ __asm__ __volatile__(
18331+ " .align 2,0x90\n"
18332+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18333+ " cmpl $67, %0\n"
18334+ " jbe 3f\n"
18335+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18336+ " .align 2,0x90\n"
18337+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18338+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18339+ "5: movl %%eax, 0(%3)\n"
18340+ "6: movl %%edx, 4(%3)\n"
18341+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18342+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18343+ "9: movl %%eax, 8(%3)\n"
18344+ "10: movl %%edx, 12(%3)\n"
18345+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18346+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18347+ "13: movl %%eax, 16(%3)\n"
18348+ "14: movl %%edx, 20(%3)\n"
18349+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18350+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18351+ "17: movl %%eax, 24(%3)\n"
18352+ "18: movl %%edx, 28(%3)\n"
18353+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18354+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18355+ "21: movl %%eax, 32(%3)\n"
18356+ "22: movl %%edx, 36(%3)\n"
18357+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18358+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18359+ "25: movl %%eax, 40(%3)\n"
18360+ "26: movl %%edx, 44(%3)\n"
18361+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18362+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18363+ "29: movl %%eax, 48(%3)\n"
18364+ "30: movl %%edx, 52(%3)\n"
18365+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18366+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18367+ "33: movl %%eax, 56(%3)\n"
18368+ "34: movl %%edx, 60(%3)\n"
18369+ " addl $-64, %0\n"
18370+ " addl $64, %4\n"
18371+ " addl $64, %3\n"
18372+ " cmpl $63, %0\n"
18373+ " ja 1b\n"
18374+ "35: movl %0, %%eax\n"
18375+ " shrl $2, %0\n"
18376+ " andl $3, %%eax\n"
18377+ " cld\n"
18378+ "99: rep; "__copyuser_seg" movsl\n"
18379+ "36: movl %%eax, %0\n"
18380+ "37: rep; "__copyuser_seg" movsb\n"
18381+ "100:\n"
18382 ".section .fixup,\"ax\"\n"
18383 "101: lea 0(%%eax,%0,4),%0\n"
18384 " jmp 100b\n"
18385@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18386 int d0, d1;
18387 __asm__ __volatile__(
18388 " .align 2,0x90\n"
18389- "0: movl 32(%4), %%eax\n"
18390+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18391 " cmpl $67, %0\n"
18392 " jbe 2f\n"
18393- "1: movl 64(%4), %%eax\n"
18394+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18395 " .align 2,0x90\n"
18396- "2: movl 0(%4), %%eax\n"
18397- "21: movl 4(%4), %%edx\n"
18398+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18399+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18400 " movl %%eax, 0(%3)\n"
18401 " movl %%edx, 4(%3)\n"
18402- "3: movl 8(%4), %%eax\n"
18403- "31: movl 12(%4),%%edx\n"
18404+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18405+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18406 " movl %%eax, 8(%3)\n"
18407 " movl %%edx, 12(%3)\n"
18408- "4: movl 16(%4), %%eax\n"
18409- "41: movl 20(%4), %%edx\n"
18410+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18411+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18412 " movl %%eax, 16(%3)\n"
18413 " movl %%edx, 20(%3)\n"
18414- "10: movl 24(%4), %%eax\n"
18415- "51: movl 28(%4), %%edx\n"
18416+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18417+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18418 " movl %%eax, 24(%3)\n"
18419 " movl %%edx, 28(%3)\n"
18420- "11: movl 32(%4), %%eax\n"
18421- "61: movl 36(%4), %%edx\n"
18422+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18423+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18424 " movl %%eax, 32(%3)\n"
18425 " movl %%edx, 36(%3)\n"
18426- "12: movl 40(%4), %%eax\n"
18427- "71: movl 44(%4), %%edx\n"
18428+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18429+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18430 " movl %%eax, 40(%3)\n"
18431 " movl %%edx, 44(%3)\n"
18432- "13: movl 48(%4), %%eax\n"
18433- "81: movl 52(%4), %%edx\n"
18434+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18435+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18436 " movl %%eax, 48(%3)\n"
18437 " movl %%edx, 52(%3)\n"
18438- "14: movl 56(%4), %%eax\n"
18439- "91: movl 60(%4), %%edx\n"
18440+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18441+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18442 " movl %%eax, 56(%3)\n"
18443 " movl %%edx, 60(%3)\n"
18444 " addl $-64, %0\n"
18445@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18446 " shrl $2, %0\n"
18447 " andl $3, %%eax\n"
18448 " cld\n"
18449- "6: rep; movsl\n"
18450+ "6: rep; "__copyuser_seg" movsl\n"
18451 " movl %%eax,%0\n"
18452- "7: rep; movsb\n"
18453+ "7: rep; "__copyuser_seg" movsb\n"
18454 "8:\n"
18455 ".section .fixup,\"ax\"\n"
18456 "9: lea 0(%%eax,%0,4),%0\n"
18457@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18458
18459 __asm__ __volatile__(
18460 " .align 2,0x90\n"
18461- "0: movl 32(%4), %%eax\n"
18462+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18463 " cmpl $67, %0\n"
18464 " jbe 2f\n"
18465- "1: movl 64(%4), %%eax\n"
18466+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18467 " .align 2,0x90\n"
18468- "2: movl 0(%4), %%eax\n"
18469- "21: movl 4(%4), %%edx\n"
18470+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18471+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18472 " movnti %%eax, 0(%3)\n"
18473 " movnti %%edx, 4(%3)\n"
18474- "3: movl 8(%4), %%eax\n"
18475- "31: movl 12(%4),%%edx\n"
18476+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18477+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18478 " movnti %%eax, 8(%3)\n"
18479 " movnti %%edx, 12(%3)\n"
18480- "4: movl 16(%4), %%eax\n"
18481- "41: movl 20(%4), %%edx\n"
18482+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18483+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18484 " movnti %%eax, 16(%3)\n"
18485 " movnti %%edx, 20(%3)\n"
18486- "10: movl 24(%4), %%eax\n"
18487- "51: movl 28(%4), %%edx\n"
18488+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18489+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18490 " movnti %%eax, 24(%3)\n"
18491 " movnti %%edx, 28(%3)\n"
18492- "11: movl 32(%4), %%eax\n"
18493- "61: movl 36(%4), %%edx\n"
18494+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18495+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18496 " movnti %%eax, 32(%3)\n"
18497 " movnti %%edx, 36(%3)\n"
18498- "12: movl 40(%4), %%eax\n"
18499- "71: movl 44(%4), %%edx\n"
18500+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18501+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18502 " movnti %%eax, 40(%3)\n"
18503 " movnti %%edx, 44(%3)\n"
18504- "13: movl 48(%4), %%eax\n"
18505- "81: movl 52(%4), %%edx\n"
18506+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18507+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18508 " movnti %%eax, 48(%3)\n"
18509 " movnti %%edx, 52(%3)\n"
18510- "14: movl 56(%4), %%eax\n"
18511- "91: movl 60(%4), %%edx\n"
18512+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18513+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18514 " movnti %%eax, 56(%3)\n"
18515 " movnti %%edx, 60(%3)\n"
18516 " addl $-64, %0\n"
18517@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18518 " shrl $2, %0\n"
18519 " andl $3, %%eax\n"
18520 " cld\n"
18521- "6: rep; movsl\n"
18522+ "6: rep; "__copyuser_seg" movsl\n"
18523 " movl %%eax,%0\n"
18524- "7: rep; movsb\n"
18525+ "7: rep; "__copyuser_seg" movsb\n"
18526 "8:\n"
18527 ".section .fixup,\"ax\"\n"
18528 "9: lea 0(%%eax,%0,4),%0\n"
18529@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18530
18531 __asm__ __volatile__(
18532 " .align 2,0x90\n"
18533- "0: movl 32(%4), %%eax\n"
18534+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18535 " cmpl $67, %0\n"
18536 " jbe 2f\n"
18537- "1: movl 64(%4), %%eax\n"
18538+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18539 " .align 2,0x90\n"
18540- "2: movl 0(%4), %%eax\n"
18541- "21: movl 4(%4), %%edx\n"
18542+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18543+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18544 " movnti %%eax, 0(%3)\n"
18545 " movnti %%edx, 4(%3)\n"
18546- "3: movl 8(%4), %%eax\n"
18547- "31: movl 12(%4),%%edx\n"
18548+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18549+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18550 " movnti %%eax, 8(%3)\n"
18551 " movnti %%edx, 12(%3)\n"
18552- "4: movl 16(%4), %%eax\n"
18553- "41: movl 20(%4), %%edx\n"
18554+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18555+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18556 " movnti %%eax, 16(%3)\n"
18557 " movnti %%edx, 20(%3)\n"
18558- "10: movl 24(%4), %%eax\n"
18559- "51: movl 28(%4), %%edx\n"
18560+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18561+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18562 " movnti %%eax, 24(%3)\n"
18563 " movnti %%edx, 28(%3)\n"
18564- "11: movl 32(%4), %%eax\n"
18565- "61: movl 36(%4), %%edx\n"
18566+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18567+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18568 " movnti %%eax, 32(%3)\n"
18569 " movnti %%edx, 36(%3)\n"
18570- "12: movl 40(%4), %%eax\n"
18571- "71: movl 44(%4), %%edx\n"
18572+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18573+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18574 " movnti %%eax, 40(%3)\n"
18575 " movnti %%edx, 44(%3)\n"
18576- "13: movl 48(%4), %%eax\n"
18577- "81: movl 52(%4), %%edx\n"
18578+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18579+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18580 " movnti %%eax, 48(%3)\n"
18581 " movnti %%edx, 52(%3)\n"
18582- "14: movl 56(%4), %%eax\n"
18583- "91: movl 60(%4), %%edx\n"
18584+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18585+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18586 " movnti %%eax, 56(%3)\n"
18587 " movnti %%edx, 60(%3)\n"
18588 " addl $-64, %0\n"
18589@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18590 " shrl $2, %0\n"
18591 " andl $3, %%eax\n"
18592 " cld\n"
18593- "6: rep; movsl\n"
18594+ "6: rep; "__copyuser_seg" movsl\n"
18595 " movl %%eax,%0\n"
18596- "7: rep; movsb\n"
18597+ "7: rep; "__copyuser_seg" movsb\n"
18598 "8:\n"
18599 ".section .fixup,\"ax\"\n"
18600 "9: lea 0(%%eax,%0,4),%0\n"
18601@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18602 */
18603 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18604 unsigned long size);
18605-unsigned long __copy_user_intel(void __user *to, const void *from,
18606+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18607+ unsigned long size);
18608+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18609 unsigned long size);
18610 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18611 const void __user *from, unsigned long size);
18612 #endif /* CONFIG_X86_INTEL_USERCOPY */
18613
18614 /* Generic arbitrary sized copy. */
18615-#define __copy_user(to, from, size) \
18616+#define __copy_user(to, from, size, prefix, set, restore) \
18617 do { \
18618 int __d0, __d1, __d2; \
18619 __asm__ __volatile__( \
18620+ set \
18621 " cmp $7,%0\n" \
18622 " jbe 1f\n" \
18623 " movl %1,%0\n" \
18624 " negl %0\n" \
18625 " andl $7,%0\n" \
18626 " subl %0,%3\n" \
18627- "4: rep; movsb\n" \
18628+ "4: rep; "prefix"movsb\n" \
18629 " movl %3,%0\n" \
18630 " shrl $2,%0\n" \
18631 " andl $3,%3\n" \
18632 " .align 2,0x90\n" \
18633- "0: rep; movsl\n" \
18634+ "0: rep; "prefix"movsl\n" \
18635 " movl %3,%0\n" \
18636- "1: rep; movsb\n" \
18637+ "1: rep; "prefix"movsb\n" \
18638 "2:\n" \
18639+ restore \
18640 ".section .fixup,\"ax\"\n" \
18641 "5: addl %3,%0\n" \
18642 " jmp 2b\n" \
18643@@ -682,14 +799,14 @@ do { \
18644 " negl %0\n" \
18645 " andl $7,%0\n" \
18646 " subl %0,%3\n" \
18647- "4: rep; movsb\n" \
18648+ "4: rep; "__copyuser_seg"movsb\n" \
18649 " movl %3,%0\n" \
18650 " shrl $2,%0\n" \
18651 " andl $3,%3\n" \
18652 " .align 2,0x90\n" \
18653- "0: rep; movsl\n" \
18654+ "0: rep; "__copyuser_seg"movsl\n" \
18655 " movl %3,%0\n" \
18656- "1: rep; movsb\n" \
18657+ "1: rep; "__copyuser_seg"movsb\n" \
18658 "2:\n" \
18659 ".section .fixup,\"ax\"\n" \
18660 "5: addl %3,%0\n" \
18661@@ -775,9 +892,9 @@ survive:
18662 }
18663 #endif
18664 if (movsl_is_ok(to, from, n))
18665- __copy_user(to, from, n);
18666+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18667 else
18668- n = __copy_user_intel(to, from, n);
18669+ n = __generic_copy_to_user_intel(to, from, n);
18670 return n;
18671 }
18672 EXPORT_SYMBOL(__copy_to_user_ll);
18673@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18674 unsigned long n)
18675 {
18676 if (movsl_is_ok(to, from, n))
18677- __copy_user(to, from, n);
18678+ __copy_user(to, from, n, __copyuser_seg, "", "");
18679 else
18680- n = __copy_user_intel((void __user *)to,
18681- (const void *)from, n);
18682+ n = __generic_copy_from_user_intel(to, from, n);
18683 return n;
18684 }
18685 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18686@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18687 if (n > 64 && cpu_has_xmm2)
18688 n = __copy_user_intel_nocache(to, from, n);
18689 else
18690- __copy_user(to, from, n);
18691+ __copy_user(to, from, n, __copyuser_seg, "", "");
18692 #else
18693- __copy_user(to, from, n);
18694+ __copy_user(to, from, n, __copyuser_seg, "", "");
18695 #endif
18696 return n;
18697 }
18698 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18699
18700-/**
18701- * copy_to_user: - Copy a block of data into user space.
18702- * @to: Destination address, in user space.
18703- * @from: Source address, in kernel space.
18704- * @n: Number of bytes to copy.
18705- *
18706- * Context: User context only. This function may sleep.
18707- *
18708- * Copy data from kernel space to user space.
18709- *
18710- * Returns number of bytes that could not be copied.
18711- * On success, this will be zero.
18712- */
18713-unsigned long
18714-copy_to_user(void __user *to, const void *from, unsigned long n)
18715+void copy_from_user_overflow(void)
18716 {
18717- if (access_ok(VERIFY_WRITE, to, n))
18718- n = __copy_to_user(to, from, n);
18719- return n;
18720+ WARN(1, "Buffer overflow detected!\n");
18721 }
18722-EXPORT_SYMBOL(copy_to_user);
18723+EXPORT_SYMBOL(copy_from_user_overflow);
18724
18725-/**
18726- * copy_from_user: - Copy a block of data from user space.
18727- * @to: Destination address, in kernel space.
18728- * @from: Source address, in user space.
18729- * @n: Number of bytes to copy.
18730- *
18731- * Context: User context only. This function may sleep.
18732- *
18733- * Copy data from user space to kernel space.
18734- *
18735- * Returns number of bytes that could not be copied.
18736- * On success, this will be zero.
18737- *
18738- * If some data could not be copied, this function will pad the copied
18739- * data to the requested size using zero bytes.
18740- */
18741-unsigned long
18742-_copy_from_user(void *to, const void __user *from, unsigned long n)
18743+void copy_to_user_overflow(void)
18744 {
18745- if (access_ok(VERIFY_READ, from, n))
18746- n = __copy_from_user(to, from, n);
18747- else
18748- memset(to, 0, n);
18749- return n;
18750+ WARN(1, "Buffer overflow detected!\n");
18751 }
18752-EXPORT_SYMBOL(_copy_from_user);
18753+EXPORT_SYMBOL(copy_to_user_overflow);
18754
18755-void copy_from_user_overflow(void)
18756+#ifdef CONFIG_PAX_MEMORY_UDEREF
18757+void __set_fs(mm_segment_t x)
18758 {
18759- WARN(1, "Buffer overflow detected!\n");
18760+ switch (x.seg) {
18761+ case 0:
18762+ loadsegment(gs, 0);
18763+ break;
18764+ case TASK_SIZE_MAX:
18765+ loadsegment(gs, __USER_DS);
18766+ break;
18767+ case -1UL:
18768+ loadsegment(gs, __KERNEL_DS);
18769+ break;
18770+ default:
18771+ BUG();
18772+ }
18773+ return;
18774 }
18775-EXPORT_SYMBOL(copy_from_user_overflow);
18776+EXPORT_SYMBOL(__set_fs);
18777+
18778+void set_fs(mm_segment_t x)
18779+{
18780+ current_thread_info()->addr_limit = x;
18781+ __set_fs(x);
18782+}
18783+EXPORT_SYMBOL(set_fs);
18784+#endif
18785diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18786--- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18787+++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18788@@ -42,6 +42,12 @@ long
18789 __strncpy_from_user(char *dst, const char __user *src, long count)
18790 {
18791 long res;
18792+
18793+#ifdef CONFIG_PAX_MEMORY_UDEREF
18794+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18795+ src += PAX_USER_SHADOW_BASE;
18796+#endif
18797+
18798 __do_strncpy_from_user(dst, src, count, res);
18799 return res;
18800 }
18801@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18802 {
18803 long __d0;
18804 might_fault();
18805+
18806+#ifdef CONFIG_PAX_MEMORY_UDEREF
18807+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18808+ addr += PAX_USER_SHADOW_BASE;
18809+#endif
18810+
18811 /* no memory constraint because it doesn't change any memory gcc knows
18812 about */
18813 asm volatile(
18814@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18815
18816 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18817 {
18818- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18819+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18820+
18821+#ifdef CONFIG_PAX_MEMORY_UDEREF
18822+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18823+ to += PAX_USER_SHADOW_BASE;
18824+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18825+ from += PAX_USER_SHADOW_BASE;
18826+#endif
18827+
18828 return copy_user_generic((__force void *)to, (__force void *)from, len);
18829- }
18830- return len;
18831+ }
18832+ return len;
18833 }
18834 EXPORT_SYMBOL(copy_in_user);
18835
18836diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18837--- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18838+++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18839@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18840 else
18841 BITS := 64
18842 UTS_MACHINE := x86_64
18843+ biarch := $(call cc-option,-m64)
18844 CHECKFLAGS += -D__x86_64__ -m64
18845
18846 KBUILD_AFLAGS += -m64
18847@@ -195,3 +196,12 @@ define archhelp
18848 echo ' FDARGS="..." arguments for the booted kernel'
18849 echo ' FDINITRD=file initrd for the booted kernel'
18850 endef
18851+
18852+define OLD_LD
18853+
18854+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18855+*** Please upgrade your binutils to 2.18 or newer
18856+endef
18857+
18858+archprepare:
18859+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18860diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18861--- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18862+++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18863@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18864 const struct exception_table_entry *fixup;
18865
18866 #ifdef CONFIG_PNPBIOS
18867- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18868+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18869 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18870 extern u32 pnp_bios_is_utter_crap;
18871 pnp_bios_is_utter_crap = 1;
18872diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18873--- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18874+++ linux-3.0.4/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18875@@ -13,10 +13,18 @@
18876 #include <linux/perf_event.h> /* perf_sw_event */
18877 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18878 #include <linux/prefetch.h> /* prefetchw */
18879+#include <linux/unistd.h>
18880+#include <linux/compiler.h>
18881
18882 #include <asm/traps.h> /* dotraplinkage, ... */
18883 #include <asm/pgalloc.h> /* pgd_*(), ... */
18884 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18885+#include <asm/vsyscall.h>
18886+#include <asm/tlbflush.h>
18887+
18888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18889+#include <asm/stacktrace.h>
18890+#endif
18891
18892 /*
18893 * Page fault error code bits:
18894@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18895 int ret = 0;
18896
18897 /* kprobe_running() needs smp_processor_id() */
18898- if (kprobes_built_in() && !user_mode_vm(regs)) {
18899+ if (kprobes_built_in() && !user_mode(regs)) {
18900 preempt_disable();
18901 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18902 ret = 1;
18903@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18904 return !instr_lo || (instr_lo>>1) == 1;
18905 case 0x00:
18906 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18907- if (probe_kernel_address(instr, opcode))
18908+ if (user_mode(regs)) {
18909+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18910+ return 0;
18911+ } else if (probe_kernel_address(instr, opcode))
18912 return 0;
18913
18914 *prefetch = (instr_lo == 0xF) &&
18915@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18916 while (instr < max_instr) {
18917 unsigned char opcode;
18918
18919- if (probe_kernel_address(instr, opcode))
18920+ if (user_mode(regs)) {
18921+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18922+ break;
18923+ } else if (probe_kernel_address(instr, opcode))
18924 break;
18925
18926 instr++;
18927@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18928 force_sig_info(si_signo, &info, tsk);
18929 }
18930
18931+#ifdef CONFIG_PAX_EMUTRAMP
18932+static int pax_handle_fetch_fault(struct pt_regs *regs);
18933+#endif
18934+
18935+#ifdef CONFIG_PAX_PAGEEXEC
18936+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18937+{
18938+ pgd_t *pgd;
18939+ pud_t *pud;
18940+ pmd_t *pmd;
18941+
18942+ pgd = pgd_offset(mm, address);
18943+ if (!pgd_present(*pgd))
18944+ return NULL;
18945+ pud = pud_offset(pgd, address);
18946+ if (!pud_present(*pud))
18947+ return NULL;
18948+ pmd = pmd_offset(pud, address);
18949+ if (!pmd_present(*pmd))
18950+ return NULL;
18951+ return pmd;
18952+}
18953+#endif
18954+
18955 DEFINE_SPINLOCK(pgd_lock);
18956 LIST_HEAD(pgd_list);
18957
18958@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18959 for (address = VMALLOC_START & PMD_MASK;
18960 address >= TASK_SIZE && address < FIXADDR_TOP;
18961 address += PMD_SIZE) {
18962+
18963+#ifdef CONFIG_PAX_PER_CPU_PGD
18964+ unsigned long cpu;
18965+#else
18966 struct page *page;
18967+#endif
18968
18969 spin_lock(&pgd_lock);
18970+
18971+#ifdef CONFIG_PAX_PER_CPU_PGD
18972+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18973+ pgd_t *pgd = get_cpu_pgd(cpu);
18974+ pmd_t *ret;
18975+#else
18976 list_for_each_entry(page, &pgd_list, lru) {
18977+ pgd_t *pgd = page_address(page);
18978 spinlock_t *pgt_lock;
18979 pmd_t *ret;
18980
18981@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18982 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18983
18984 spin_lock(pgt_lock);
18985- ret = vmalloc_sync_one(page_address(page), address);
18986+#endif
18987+
18988+ ret = vmalloc_sync_one(pgd, address);
18989+
18990+#ifndef CONFIG_PAX_PER_CPU_PGD
18991 spin_unlock(pgt_lock);
18992+#endif
18993
18994 if (!ret)
18995 break;
18996@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18997 * an interrupt in the middle of a task switch..
18998 */
18999 pgd_paddr = read_cr3();
19000+
19001+#ifdef CONFIG_PAX_PER_CPU_PGD
19002+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19003+#endif
19004+
19005 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19006 if (!pmd_k)
19007 return -1;
19008@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19009 * happen within a race in page table update. In the later
19010 * case just flush:
19011 */
19012+
19013+#ifdef CONFIG_PAX_PER_CPU_PGD
19014+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19015+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19016+#else
19017 pgd = pgd_offset(current->active_mm, address);
19018+#endif
19019+
19020 pgd_ref = pgd_offset_k(address);
19021 if (pgd_none(*pgd_ref))
19022 return -1;
19023@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19024 static int is_errata100(struct pt_regs *regs, unsigned long address)
19025 {
19026 #ifdef CONFIG_X86_64
19027- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19028+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19029 return 1;
19030 #endif
19031 return 0;
19032@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19033 }
19034
19035 static const char nx_warning[] = KERN_CRIT
19036-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19037+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19038
19039 static void
19040 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19041@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19042 if (!oops_may_print())
19043 return;
19044
19045- if (error_code & PF_INSTR) {
19046+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19047 unsigned int level;
19048
19049 pte_t *pte = lookup_address(address, &level);
19050
19051 if (pte && pte_present(*pte) && !pte_exec(*pte))
19052- printk(nx_warning, current_uid());
19053+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19054+ }
19055+
19056+#ifdef CONFIG_PAX_KERNEXEC
19057+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19058+ if (current->signal->curr_ip)
19059+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19060+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19061+ else
19062+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19063+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19064 }
19065+#endif
19066
19067 printk(KERN_ALERT "BUG: unable to handle kernel ");
19068 if (address < PAGE_SIZE)
19069@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19070 unsigned long address, int si_code)
19071 {
19072 struct task_struct *tsk = current;
19073+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19074+ struct mm_struct *mm = tsk->mm;
19075+#endif
19076+
19077+#ifdef CONFIG_X86_64
19078+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19079+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19080+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19081+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19082+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19083+ return;
19084+ }
19085+ }
19086+#endif
19087+
19088+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19089+ if (mm && (error_code & PF_USER)) {
19090+ unsigned long ip = regs->ip;
19091+
19092+ if (v8086_mode(regs))
19093+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19094+
19095+ /*
19096+ * It's possible to have interrupts off here:
19097+ */
19098+ local_irq_enable();
19099+
19100+#ifdef CONFIG_PAX_PAGEEXEC
19101+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19102+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19103+
19104+#ifdef CONFIG_PAX_EMUTRAMP
19105+ switch (pax_handle_fetch_fault(regs)) {
19106+ case 2:
19107+ return;
19108+ }
19109+#endif
19110+
19111+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19112+ do_group_exit(SIGKILL);
19113+ }
19114+#endif
19115+
19116+#ifdef CONFIG_PAX_SEGMEXEC
19117+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19118+
19119+#ifdef CONFIG_PAX_EMUTRAMP
19120+ switch (pax_handle_fetch_fault(regs)) {
19121+ case 2:
19122+ return;
19123+ }
19124+#endif
19125+
19126+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19127+ do_group_exit(SIGKILL);
19128+ }
19129+#endif
19130+
19131+ }
19132+#endif
19133
19134 /* User mode accesses just cause a SIGSEGV */
19135 if (error_code & PF_USER) {
19136@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19137 return 1;
19138 }
19139
19140+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19141+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19142+{
19143+ pte_t *pte;
19144+ pmd_t *pmd;
19145+ spinlock_t *ptl;
19146+ unsigned char pte_mask;
19147+
19148+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19149+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19150+ return 0;
19151+
19152+ /* PaX: it's our fault, let's handle it if we can */
19153+
19154+ /* PaX: take a look at read faults before acquiring any locks */
19155+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19156+ /* instruction fetch attempt from a protected page in user mode */
19157+ up_read(&mm->mmap_sem);
19158+
19159+#ifdef CONFIG_PAX_EMUTRAMP
19160+ switch (pax_handle_fetch_fault(regs)) {
19161+ case 2:
19162+ return 1;
19163+ }
19164+#endif
19165+
19166+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19167+ do_group_exit(SIGKILL);
19168+ }
19169+
19170+ pmd = pax_get_pmd(mm, address);
19171+ if (unlikely(!pmd))
19172+ return 0;
19173+
19174+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19175+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19176+ pte_unmap_unlock(pte, ptl);
19177+ return 0;
19178+ }
19179+
19180+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19181+ /* write attempt to a protected page in user mode */
19182+ pte_unmap_unlock(pte, ptl);
19183+ return 0;
19184+ }
19185+
19186+#ifdef CONFIG_SMP
19187+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19188+#else
19189+ if (likely(address > get_limit(regs->cs)))
19190+#endif
19191+ {
19192+ set_pte(pte, pte_mkread(*pte));
19193+ __flush_tlb_one(address);
19194+ pte_unmap_unlock(pte, ptl);
19195+ up_read(&mm->mmap_sem);
19196+ return 1;
19197+ }
19198+
19199+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19200+
19201+ /*
19202+ * PaX: fill DTLB with user rights and retry
19203+ */
19204+ __asm__ __volatile__ (
19205+ "orb %2,(%1)\n"
19206+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19207+/*
19208+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19209+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19210+ * page fault when examined during a TLB load attempt. this is true not only
19211+ * for PTEs holding a non-present entry but also present entries that will
19212+ * raise a page fault (such as those set up by PaX, or the copy-on-write
19213+ * mechanism). in effect it means that we do *not* need to flush the TLBs
19214+ * for our target pages since their PTEs are simply not in the TLBs at all.
19215+
19216+ * the best thing in omitting it is that we gain around 15-20% speed in the
19217+ * fast path of the page fault handler and can get rid of tracing since we
19218+ * can no longer flush unintended entries.
19219+ */
19220+ "invlpg (%0)\n"
19221+#endif
19222+ __copyuser_seg"testb $0,(%0)\n"
19223+ "xorb %3,(%1)\n"
19224+ :
19225+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19226+ : "memory", "cc");
19227+ pte_unmap_unlock(pte, ptl);
19228+ up_read(&mm->mmap_sem);
19229+ return 1;
19230+}
19231+#endif
19232+
19233 /*
19234 * Handle a spurious fault caused by a stale TLB entry.
19235 *
19236@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19237 static inline int
19238 access_error(unsigned long error_code, struct vm_area_struct *vma)
19239 {
19240+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19241+ return 1;
19242+
19243 if (error_code & PF_WRITE) {
19244 /* write, present and write, not present: */
19245 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19246@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19247 {
19248 struct vm_area_struct *vma;
19249 struct task_struct *tsk;
19250- unsigned long address;
19251 struct mm_struct *mm;
19252 int fault;
19253 int write = error_code & PF_WRITE;
19254 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19255 (write ? FAULT_FLAG_WRITE : 0);
19256
19257+ /* Get the faulting address: */
19258+ unsigned long address = read_cr2();
19259+
19260+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19261+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19262+ if (!search_exception_tables(regs->ip)) {
19263+ bad_area_nosemaphore(regs, error_code, address);
19264+ return;
19265+ }
19266+ if (address < PAX_USER_SHADOW_BASE) {
19267+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19268+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19269+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19270+ } else
19271+ address -= PAX_USER_SHADOW_BASE;
19272+ }
19273+#endif
19274+
19275 tsk = current;
19276 mm = tsk->mm;
19277
19278- /* Get the faulting address: */
19279- address = read_cr2();
19280-
19281 /*
19282 * Detect and handle instructions that would cause a page fault for
19283 * both a tracked kernel page and a userspace page.
19284@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19285 * User-mode registers count as a user access even for any
19286 * potential system fault or CPU buglet:
19287 */
19288- if (user_mode_vm(regs)) {
19289+ if (user_mode(regs)) {
19290 local_irq_enable();
19291 error_code |= PF_USER;
19292 } else {
19293@@ -1103,6 +1351,11 @@ retry:
19294 might_sleep();
19295 }
19296
19297+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19298+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19299+ return;
19300+#endif
19301+
19302 vma = find_vma(mm, address);
19303 if (unlikely(!vma)) {
19304 bad_area(regs, error_code, address);
19305@@ -1114,18 +1367,24 @@ retry:
19306 bad_area(regs, error_code, address);
19307 return;
19308 }
19309- if (error_code & PF_USER) {
19310- /*
19311- * Accessing the stack below %sp is always a bug.
19312- * The large cushion allows instructions like enter
19313- * and pusha to work. ("enter $65535, $31" pushes
19314- * 32 pointers and then decrements %sp by 65535.)
19315- */
19316- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19317- bad_area(regs, error_code, address);
19318- return;
19319- }
19320+ /*
19321+ * Accessing the stack below %sp is always a bug.
19322+ * The large cushion allows instructions like enter
19323+ * and pusha to work. ("enter $65535, $31" pushes
19324+ * 32 pointers and then decrements %sp by 65535.)
19325+ */
19326+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19327+ bad_area(regs, error_code, address);
19328+ return;
19329 }
19330+
19331+#ifdef CONFIG_PAX_SEGMEXEC
19332+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19333+ bad_area(regs, error_code, address);
19334+ return;
19335+ }
19336+#endif
19337+
19338 if (unlikely(expand_stack(vma, address))) {
19339 bad_area(regs, error_code, address);
19340 return;
19341@@ -1180,3 +1439,199 @@ good_area:
19342
19343 up_read(&mm->mmap_sem);
19344 }
19345+
19346+#ifdef CONFIG_PAX_EMUTRAMP
19347+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19348+{
19349+ int err;
19350+
19351+ do { /* PaX: gcc trampoline emulation #1 */
19352+ unsigned char mov1, mov2;
19353+ unsigned short jmp;
19354+ unsigned int addr1, addr2;
19355+
19356+#ifdef CONFIG_X86_64
19357+ if ((regs->ip + 11) >> 32)
19358+ break;
19359+#endif
19360+
19361+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19362+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19363+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19364+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19365+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19366+
19367+ if (err)
19368+ break;
19369+
19370+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19371+ regs->cx = addr1;
19372+ regs->ax = addr2;
19373+ regs->ip = addr2;
19374+ return 2;
19375+ }
19376+ } while (0);
19377+
19378+ do { /* PaX: gcc trampoline emulation #2 */
19379+ unsigned char mov, jmp;
19380+ unsigned int addr1, addr2;
19381+
19382+#ifdef CONFIG_X86_64
19383+ if ((regs->ip + 9) >> 32)
19384+ break;
19385+#endif
19386+
19387+ err = get_user(mov, (unsigned char __user *)regs->ip);
19388+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19389+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19390+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19391+
19392+ if (err)
19393+ break;
19394+
19395+ if (mov == 0xB9 && jmp == 0xE9) {
19396+ regs->cx = addr1;
19397+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19398+ return 2;
19399+ }
19400+ } while (0);
19401+
19402+ return 1; /* PaX in action */
19403+}
19404+
19405+#ifdef CONFIG_X86_64
19406+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19407+{
19408+ int err;
19409+
19410+ do { /* PaX: gcc trampoline emulation #1 */
19411+ unsigned short mov1, mov2, jmp1;
19412+ unsigned char jmp2;
19413+ unsigned int addr1;
19414+ unsigned long addr2;
19415+
19416+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19417+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19418+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19419+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19420+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19421+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19422+
19423+ if (err)
19424+ break;
19425+
19426+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19427+ regs->r11 = addr1;
19428+ regs->r10 = addr2;
19429+ regs->ip = addr1;
19430+ return 2;
19431+ }
19432+ } while (0);
19433+
19434+ do { /* PaX: gcc trampoline emulation #2 */
19435+ unsigned short mov1, mov2, jmp1;
19436+ unsigned char jmp2;
19437+ unsigned long addr1, addr2;
19438+
19439+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19440+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19441+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19442+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19443+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19444+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19445+
19446+ if (err)
19447+ break;
19448+
19449+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19450+ regs->r11 = addr1;
19451+ regs->r10 = addr2;
19452+ regs->ip = addr1;
19453+ return 2;
19454+ }
19455+ } while (0);
19456+
19457+ return 1; /* PaX in action */
19458+}
19459+#endif
19460+
19461+/*
19462+ * PaX: decide what to do with offenders (regs->ip = fault address)
19463+ *
19464+ * returns 1 when task should be killed
19465+ * 2 when gcc trampoline was detected
19466+ */
19467+static int pax_handle_fetch_fault(struct pt_regs *regs)
19468+{
19469+ if (v8086_mode(regs))
19470+ return 1;
19471+
19472+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19473+ return 1;
19474+
19475+#ifdef CONFIG_X86_32
19476+ return pax_handle_fetch_fault_32(regs);
19477+#else
19478+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19479+ return pax_handle_fetch_fault_32(regs);
19480+ else
19481+ return pax_handle_fetch_fault_64(regs);
19482+#endif
19483+}
19484+#endif
19485+
19486+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19487+void pax_report_insns(void *pc, void *sp)
19488+{
19489+ long i;
19490+
19491+ printk(KERN_ERR "PAX: bytes at PC: ");
19492+ for (i = 0; i < 20; i++) {
19493+ unsigned char c;
19494+ if (get_user(c, (__force unsigned char __user *)pc+i))
19495+ printk(KERN_CONT "?? ");
19496+ else
19497+ printk(KERN_CONT "%02x ", c);
19498+ }
19499+ printk("\n");
19500+
19501+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19502+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19503+ unsigned long c;
19504+ if (get_user(c, (__force unsigned long __user *)sp+i))
19505+#ifdef CONFIG_X86_32
19506+ printk(KERN_CONT "???????? ");
19507+#else
19508+ printk(KERN_CONT "???????????????? ");
19509+#endif
19510+ else
19511+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19512+ }
19513+ printk("\n");
19514+}
19515+#endif
19516+
19517+/**
19518+ * probe_kernel_write(): safely attempt to write to a location
19519+ * @dst: address to write to
19520+ * @src: pointer to the data that shall be written
19521+ * @size: size of the data chunk
19522+ *
19523+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19524+ * happens, handle that and return -EFAULT.
19525+ */
19526+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19527+{
19528+ long ret;
19529+ mm_segment_t old_fs = get_fs();
19530+
19531+ set_fs(KERNEL_DS);
19532+ pagefault_disable();
19533+ pax_open_kernel();
19534+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19535+ pax_close_kernel();
19536+ pagefault_enable();
19537+ set_fs(old_fs);
19538+
19539+ return ret ? -EFAULT : 0;
19540+}
19541diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19542--- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19543+++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19544@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19545 addr = start;
19546 len = (unsigned long) nr_pages << PAGE_SHIFT;
19547 end = start + len;
19548- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19549+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19550 (void __user *)start, len)))
19551 return 0;
19552
19553diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19554--- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19555+++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19556@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19557 idx = type + KM_TYPE_NR*smp_processor_id();
19558 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19559 BUG_ON(!pte_none(*(kmap_pte-idx)));
19560+
19561+ pax_open_kernel();
19562 set_pte(kmap_pte-idx, mk_pte(page, prot));
19563+ pax_close_kernel();
19564
19565 return (void *)vaddr;
19566 }
19567diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19568--- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19569+++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19570@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19571 struct hstate *h = hstate_file(file);
19572 struct mm_struct *mm = current->mm;
19573 struct vm_area_struct *vma;
19574- unsigned long start_addr;
19575+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19576+
19577+#ifdef CONFIG_PAX_SEGMEXEC
19578+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19579+ pax_task_size = SEGMEXEC_TASK_SIZE;
19580+#endif
19581+
19582+ pax_task_size -= PAGE_SIZE;
19583
19584 if (len > mm->cached_hole_size) {
19585- start_addr = mm->free_area_cache;
19586+ start_addr = mm->free_area_cache;
19587 } else {
19588- start_addr = TASK_UNMAPPED_BASE;
19589- mm->cached_hole_size = 0;
19590+ start_addr = mm->mmap_base;
19591+ mm->cached_hole_size = 0;
19592 }
19593
19594 full_search:
19595@@ -280,26 +287,27 @@ full_search:
19596
19597 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19598 /* At this point: (!vma || addr < vma->vm_end). */
19599- if (TASK_SIZE - len < addr) {
19600+ if (pax_task_size - len < addr) {
19601 /*
19602 * Start a new search - just in case we missed
19603 * some holes.
19604 */
19605- if (start_addr != TASK_UNMAPPED_BASE) {
19606- start_addr = TASK_UNMAPPED_BASE;
19607+ if (start_addr != mm->mmap_base) {
19608+ start_addr = mm->mmap_base;
19609 mm->cached_hole_size = 0;
19610 goto full_search;
19611 }
19612 return -ENOMEM;
19613 }
19614- if (!vma || addr + len <= vma->vm_start) {
19615- mm->free_area_cache = addr + len;
19616- return addr;
19617- }
19618+ if (check_heap_stack_gap(vma, addr, len))
19619+ break;
19620 if (addr + mm->cached_hole_size < vma->vm_start)
19621 mm->cached_hole_size = vma->vm_start - addr;
19622 addr = ALIGN(vma->vm_end, huge_page_size(h));
19623 }
19624+
19625+ mm->free_area_cache = addr + len;
19626+ return addr;
19627 }
19628
19629 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19630@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19631 {
19632 struct hstate *h = hstate_file(file);
19633 struct mm_struct *mm = current->mm;
19634- struct vm_area_struct *vma, *prev_vma;
19635- unsigned long base = mm->mmap_base, addr = addr0;
19636+ struct vm_area_struct *vma;
19637+ unsigned long base = mm->mmap_base, addr;
19638 unsigned long largest_hole = mm->cached_hole_size;
19639- int first_time = 1;
19640
19641 /* don't allow allocations above current base */
19642 if (mm->free_area_cache > base)
19643@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19644 largest_hole = 0;
19645 mm->free_area_cache = base;
19646 }
19647-try_again:
19648+
19649 /* make sure it can fit in the remaining address space */
19650 if (mm->free_area_cache < len)
19651 goto fail;
19652
19653 /* either no address requested or can't fit in requested address hole */
19654- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19655+ addr = (mm->free_area_cache - len);
19656 do {
19657+ addr &= huge_page_mask(h);
19658+ vma = find_vma(mm, addr);
19659 /*
19660 * Lookup failure means no vma is above this address,
19661 * i.e. return with success:
19662- */
19663- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19664- return addr;
19665-
19666- /*
19667 * new region fits between prev_vma->vm_end and
19668 * vma->vm_start, use it:
19669 */
19670- if (addr + len <= vma->vm_start &&
19671- (!prev_vma || (addr >= prev_vma->vm_end))) {
19672+ if (check_heap_stack_gap(vma, addr, len)) {
19673 /* remember the address as a hint for next time */
19674- mm->cached_hole_size = largest_hole;
19675- return (mm->free_area_cache = addr);
19676- } else {
19677- /* pull free_area_cache down to the first hole */
19678- if (mm->free_area_cache == vma->vm_end) {
19679- mm->free_area_cache = vma->vm_start;
19680- mm->cached_hole_size = largest_hole;
19681- }
19682+ mm->cached_hole_size = largest_hole;
19683+ return (mm->free_area_cache = addr);
19684+ }
19685+ /* pull free_area_cache down to the first hole */
19686+ if (mm->free_area_cache == vma->vm_end) {
19687+ mm->free_area_cache = vma->vm_start;
19688+ mm->cached_hole_size = largest_hole;
19689 }
19690
19691 /* remember the largest hole we saw so far */
19692 if (addr + largest_hole < vma->vm_start)
19693- largest_hole = vma->vm_start - addr;
19694+ largest_hole = vma->vm_start - addr;
19695
19696 /* try just below the current vma->vm_start */
19697- addr = (vma->vm_start - len) & huge_page_mask(h);
19698- } while (len <= vma->vm_start);
19699+ addr = skip_heap_stack_gap(vma, len);
19700+ } while (!IS_ERR_VALUE(addr));
19701
19702 fail:
19703 /*
19704- * if hint left us with no space for the requested
19705- * mapping then try again:
19706- */
19707- if (first_time) {
19708- mm->free_area_cache = base;
19709- largest_hole = 0;
19710- first_time = 0;
19711- goto try_again;
19712- }
19713- /*
19714 * A failed mmap() very likely causes application failure,
19715 * so fall back to the bottom-up function here. This scenario
19716 * can happen with large stack limits and large mmap()
19717 * allocations.
19718 */
19719- mm->free_area_cache = TASK_UNMAPPED_BASE;
19720+
19721+#ifdef CONFIG_PAX_SEGMEXEC
19722+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19723+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19724+ else
19725+#endif
19726+
19727+ mm->mmap_base = TASK_UNMAPPED_BASE;
19728+
19729+#ifdef CONFIG_PAX_RANDMMAP
19730+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19731+ mm->mmap_base += mm->delta_mmap;
19732+#endif
19733+
19734+ mm->free_area_cache = mm->mmap_base;
19735 mm->cached_hole_size = ~0UL;
19736 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19737 len, pgoff, flags);
19738@@ -386,6 +392,7 @@ fail:
19739 /*
19740 * Restore the topdown base:
19741 */
19742+ mm->mmap_base = base;
19743 mm->free_area_cache = base;
19744 mm->cached_hole_size = ~0UL;
19745
19746@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19747 struct hstate *h = hstate_file(file);
19748 struct mm_struct *mm = current->mm;
19749 struct vm_area_struct *vma;
19750+ unsigned long pax_task_size = TASK_SIZE;
19751
19752 if (len & ~huge_page_mask(h))
19753 return -EINVAL;
19754- if (len > TASK_SIZE)
19755+
19756+#ifdef CONFIG_PAX_SEGMEXEC
19757+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19758+ pax_task_size = SEGMEXEC_TASK_SIZE;
19759+#endif
19760+
19761+ pax_task_size -= PAGE_SIZE;
19762+
19763+ if (len > pax_task_size)
19764 return -ENOMEM;
19765
19766 if (flags & MAP_FIXED) {
19767@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19768 if (addr) {
19769 addr = ALIGN(addr, huge_page_size(h));
19770 vma = find_vma(mm, addr);
19771- if (TASK_SIZE - len >= addr &&
19772- (!vma || addr + len <= vma->vm_start))
19773+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19774 return addr;
19775 }
19776 if (mm->get_unmapped_area == arch_get_unmapped_area)
19777diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19778--- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19779+++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19780@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19781 }
19782
19783 /*
19784- * Creates a middle page table and puts a pointer to it in the
19785- * given global directory entry. This only returns the gd entry
19786- * in non-PAE compilation mode, since the middle layer is folded.
19787- */
19788-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19789-{
19790- pud_t *pud;
19791- pmd_t *pmd_table;
19792-
19793-#ifdef CONFIG_X86_PAE
19794- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19795- if (after_bootmem)
19796- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19797- else
19798- pmd_table = (pmd_t *)alloc_low_page();
19799- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19800- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19801- pud = pud_offset(pgd, 0);
19802- BUG_ON(pmd_table != pmd_offset(pud, 0));
19803-
19804- return pmd_table;
19805- }
19806-#endif
19807- pud = pud_offset(pgd, 0);
19808- pmd_table = pmd_offset(pud, 0);
19809-
19810- return pmd_table;
19811-}
19812-
19813-/*
19814 * Create a page table and place a pointer to it in a middle page
19815 * directory entry:
19816 */
19817@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19818 page_table = (pte_t *)alloc_low_page();
19819
19820 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19821+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19822+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19823+#else
19824 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19825+#endif
19826 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19827 }
19828
19829 return pte_offset_kernel(pmd, 0);
19830 }
19831
19832+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19833+{
19834+ pud_t *pud;
19835+ pmd_t *pmd_table;
19836+
19837+ pud = pud_offset(pgd, 0);
19838+ pmd_table = pmd_offset(pud, 0);
19839+
19840+ return pmd_table;
19841+}
19842+
19843 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19844 {
19845 int pgd_idx = pgd_index(vaddr);
19846@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19847 int pgd_idx, pmd_idx;
19848 unsigned long vaddr;
19849 pgd_t *pgd;
19850+ pud_t *pud;
19851 pmd_t *pmd;
19852 pte_t *pte = NULL;
19853
19854@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19855 pgd = pgd_base + pgd_idx;
19856
19857 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19858- pmd = one_md_table_init(pgd);
19859- pmd = pmd + pmd_index(vaddr);
19860+ pud = pud_offset(pgd, vaddr);
19861+ pmd = pmd_offset(pud, vaddr);
19862+
19863+#ifdef CONFIG_X86_PAE
19864+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19865+#endif
19866+
19867 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19868 pmd++, pmd_idx++) {
19869 pte = page_table_kmap_check(one_page_table_init(pmd),
19870@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19871 }
19872 }
19873
19874-static inline int is_kernel_text(unsigned long addr)
19875+static inline int is_kernel_text(unsigned long start, unsigned long end)
19876 {
19877- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19878- return 1;
19879- return 0;
19880+ if ((start > ktla_ktva((unsigned long)_etext) ||
19881+ end <= ktla_ktva((unsigned long)_stext)) &&
19882+ (start > ktla_ktva((unsigned long)_einittext) ||
19883+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19884+
19885+#ifdef CONFIG_ACPI_SLEEP
19886+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19887+#endif
19888+
19889+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19890+ return 0;
19891+ return 1;
19892 }
19893
19894 /*
19895@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19896 unsigned long last_map_addr = end;
19897 unsigned long start_pfn, end_pfn;
19898 pgd_t *pgd_base = swapper_pg_dir;
19899- int pgd_idx, pmd_idx, pte_ofs;
19900+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19901 unsigned long pfn;
19902 pgd_t *pgd;
19903+ pud_t *pud;
19904 pmd_t *pmd;
19905 pte_t *pte;
19906 unsigned pages_2m, pages_4k;
19907@@ -281,8 +282,13 @@ repeat:
19908 pfn = start_pfn;
19909 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19910 pgd = pgd_base + pgd_idx;
19911- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19912- pmd = one_md_table_init(pgd);
19913+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19914+ pud = pud_offset(pgd, 0);
19915+ pmd = pmd_offset(pud, 0);
19916+
19917+#ifdef CONFIG_X86_PAE
19918+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19919+#endif
19920
19921 if (pfn >= end_pfn)
19922 continue;
19923@@ -294,14 +300,13 @@ repeat:
19924 #endif
19925 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19926 pmd++, pmd_idx++) {
19927- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19928+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19929
19930 /*
19931 * Map with big pages if possible, otherwise
19932 * create normal page tables:
19933 */
19934 if (use_pse) {
19935- unsigned int addr2;
19936 pgprot_t prot = PAGE_KERNEL_LARGE;
19937 /*
19938 * first pass will use the same initial
19939@@ -311,11 +316,7 @@ repeat:
19940 __pgprot(PTE_IDENT_ATTR |
19941 _PAGE_PSE);
19942
19943- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19944- PAGE_OFFSET + PAGE_SIZE-1;
19945-
19946- if (is_kernel_text(addr) ||
19947- is_kernel_text(addr2))
19948+ if (is_kernel_text(address, address + PMD_SIZE))
19949 prot = PAGE_KERNEL_LARGE_EXEC;
19950
19951 pages_2m++;
19952@@ -332,7 +333,7 @@ repeat:
19953 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19954 pte += pte_ofs;
19955 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19956- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19957+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19958 pgprot_t prot = PAGE_KERNEL;
19959 /*
19960 * first pass will use the same initial
19961@@ -340,7 +341,7 @@ repeat:
19962 */
19963 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19964
19965- if (is_kernel_text(addr))
19966+ if (is_kernel_text(address, address + PAGE_SIZE))
19967 prot = PAGE_KERNEL_EXEC;
19968
19969 pages_4k++;
19970@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19971
19972 pud = pud_offset(pgd, va);
19973 pmd = pmd_offset(pud, va);
19974- if (!pmd_present(*pmd))
19975+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19976 break;
19977
19978 pte = pte_offset_kernel(pmd, va);
19979@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19980
19981 static void __init pagetable_init(void)
19982 {
19983- pgd_t *pgd_base = swapper_pg_dir;
19984-
19985- permanent_kmaps_init(pgd_base);
19986+ permanent_kmaps_init(swapper_pg_dir);
19987 }
19988
19989-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19990+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19991 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19992
19993 /* user-defined highmem size */
19994@@ -757,6 +756,12 @@ void __init mem_init(void)
19995
19996 pci_iommu_alloc();
19997
19998+#ifdef CONFIG_PAX_PER_CPU_PGD
19999+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20000+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20001+ KERNEL_PGD_PTRS);
20002+#endif
20003+
20004 #ifdef CONFIG_FLATMEM
20005 BUG_ON(!mem_map);
20006 #endif
20007@@ -774,7 +779,7 @@ void __init mem_init(void)
20008 set_highmem_pages_init();
20009
20010 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20011- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20012+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20013 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20014
20015 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20016@@ -815,10 +820,10 @@ void __init mem_init(void)
20017 ((unsigned long)&__init_end -
20018 (unsigned long)&__init_begin) >> 10,
20019
20020- (unsigned long)&_etext, (unsigned long)&_edata,
20021- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20022+ (unsigned long)&_sdata, (unsigned long)&_edata,
20023+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20024
20025- (unsigned long)&_text, (unsigned long)&_etext,
20026+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20027 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20028
20029 /*
20030@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20031 if (!kernel_set_to_readonly)
20032 return;
20033
20034+ start = ktla_ktva(start);
20035 pr_debug("Set kernel text: %lx - %lx for read write\n",
20036 start, start+size);
20037
20038@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20039 if (!kernel_set_to_readonly)
20040 return;
20041
20042+ start = ktla_ktva(start);
20043 pr_debug("Set kernel text: %lx - %lx for read only\n",
20044 start, start+size);
20045
20046@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20047 unsigned long start = PFN_ALIGN(_text);
20048 unsigned long size = PFN_ALIGN(_etext) - start;
20049
20050+ start = ktla_ktva(start);
20051 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20052 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20053 size >> 10);
20054diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
20055--- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20056+++ linux-3.0.4/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
20057@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20058 * around without checking the pgd every time.
20059 */
20060
20061-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20062+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20063 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20064
20065 int force_personality32;
20066@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20067
20068 for (address = start; address <= end; address += PGDIR_SIZE) {
20069 const pgd_t *pgd_ref = pgd_offset_k(address);
20070+
20071+#ifdef CONFIG_PAX_PER_CPU_PGD
20072+ unsigned long cpu;
20073+#else
20074 struct page *page;
20075+#endif
20076
20077 if (pgd_none(*pgd_ref))
20078 continue;
20079
20080 spin_lock(&pgd_lock);
20081+
20082+#ifdef CONFIG_PAX_PER_CPU_PGD
20083+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20084+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20085+#else
20086 list_for_each_entry(page, &pgd_list, lru) {
20087 pgd_t *pgd;
20088 spinlock_t *pgt_lock;
20089@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20090 /* the pgt_lock only for Xen */
20091 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20092 spin_lock(pgt_lock);
20093+#endif
20094
20095 if (pgd_none(*pgd))
20096 set_pgd(pgd, *pgd_ref);
20097@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20098 BUG_ON(pgd_page_vaddr(*pgd)
20099 != pgd_page_vaddr(*pgd_ref));
20100
20101+#ifndef CONFIG_PAX_PER_CPU_PGD
20102 spin_unlock(pgt_lock);
20103+#endif
20104+
20105 }
20106 spin_unlock(&pgd_lock);
20107 }
20108@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20109 pmd = fill_pmd(pud, vaddr);
20110 pte = fill_pte(pmd, vaddr);
20111
20112+ pax_open_kernel();
20113 set_pte(pte, new_pte);
20114+ pax_close_kernel();
20115
20116 /*
20117 * It's enough to flush this one mapping.
20118@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20119 pgd = pgd_offset_k((unsigned long)__va(phys));
20120 if (pgd_none(*pgd)) {
20121 pud = (pud_t *) spp_getpage();
20122- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20123- _PAGE_USER));
20124+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20125 }
20126 pud = pud_offset(pgd, (unsigned long)__va(phys));
20127 if (pud_none(*pud)) {
20128 pmd = (pmd_t *) spp_getpage();
20129- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20130- _PAGE_USER));
20131+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20132 }
20133 pmd = pmd_offset(pud, phys);
20134 BUG_ON(!pmd_none(*pmd));
20135@@ -693,6 +707,12 @@ void __init mem_init(void)
20136
20137 pci_iommu_alloc();
20138
20139+#ifdef CONFIG_PAX_PER_CPU_PGD
20140+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20141+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20142+ KERNEL_PGD_PTRS);
20143+#endif
20144+
20145 /* clear_bss() already clear the empty_zero_page */
20146
20147 reservedpages = 0;
20148@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20149 static struct vm_area_struct gate_vma = {
20150 .vm_start = VSYSCALL_START,
20151 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20152- .vm_page_prot = PAGE_READONLY_EXEC,
20153- .vm_flags = VM_READ | VM_EXEC
20154+ .vm_page_prot = PAGE_READONLY,
20155+ .vm_flags = VM_READ
20156 };
20157
20158 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20159@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20160
20161 const char *arch_vma_name(struct vm_area_struct *vma)
20162 {
20163- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20164+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20165 return "[vdso]";
20166 if (vma == &gate_vma)
20167 return "[vsyscall]";
20168diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
20169--- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20170+++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20171@@ -31,7 +31,7 @@ int direct_gbpages
20172 static void __init find_early_table_space(unsigned long end, int use_pse,
20173 int use_gbpages)
20174 {
20175- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20176+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20177 phys_addr_t base;
20178
20179 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20180@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20181 */
20182 int devmem_is_allowed(unsigned long pagenr)
20183 {
20184- if (pagenr <= 256)
20185+#ifdef CONFIG_GRKERNSEC_KMEM
20186+ /* allow BDA */
20187+ if (!pagenr)
20188+ return 1;
20189+ /* allow EBDA */
20190+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20191+ return 1;
20192+#else
20193+ if (!pagenr)
20194+ return 1;
20195+#ifdef CONFIG_VM86
20196+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20197+ return 1;
20198+#endif
20199+#endif
20200+
20201+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20202 return 1;
20203+#ifdef CONFIG_GRKERNSEC_KMEM
20204+ /* throw out everything else below 1MB */
20205+ if (pagenr <= 256)
20206+ return 0;
20207+#endif
20208 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20209 return 0;
20210 if (!page_is_ram(pagenr))
20211 return 1;
20212+
20213 return 0;
20214 }
20215
20216@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20217
20218 void free_initmem(void)
20219 {
20220+
20221+#ifdef CONFIG_PAX_KERNEXEC
20222+#ifdef CONFIG_X86_32
20223+ /* PaX: limit KERNEL_CS to actual size */
20224+ unsigned long addr, limit;
20225+ struct desc_struct d;
20226+ int cpu;
20227+
20228+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20229+ limit = (limit - 1UL) >> PAGE_SHIFT;
20230+
20231+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20232+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20233+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20234+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20235+ }
20236+
20237+ /* PaX: make KERNEL_CS read-only */
20238+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20239+ if (!paravirt_enabled())
20240+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20241+/*
20242+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20243+ pgd = pgd_offset_k(addr);
20244+ pud = pud_offset(pgd, addr);
20245+ pmd = pmd_offset(pud, addr);
20246+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20247+ }
20248+*/
20249+#ifdef CONFIG_X86_PAE
20250+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20251+/*
20252+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20253+ pgd = pgd_offset_k(addr);
20254+ pud = pud_offset(pgd, addr);
20255+ pmd = pmd_offset(pud, addr);
20256+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20257+ }
20258+*/
20259+#endif
20260+
20261+#ifdef CONFIG_MODULES
20262+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20263+#endif
20264+
20265+#else
20266+ pgd_t *pgd;
20267+ pud_t *pud;
20268+ pmd_t *pmd;
20269+ unsigned long addr, end;
20270+
20271+ /* PaX: make kernel code/rodata read-only, rest non-executable */
20272+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20273+ pgd = pgd_offset_k(addr);
20274+ pud = pud_offset(pgd, addr);
20275+ pmd = pmd_offset(pud, addr);
20276+ if (!pmd_present(*pmd))
20277+ continue;
20278+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20279+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20280+ else
20281+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20282+ }
20283+
20284+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20285+ end = addr + KERNEL_IMAGE_SIZE;
20286+ for (; addr < end; addr += PMD_SIZE) {
20287+ pgd = pgd_offset_k(addr);
20288+ pud = pud_offset(pgd, addr);
20289+ pmd = pmd_offset(pud, addr);
20290+ if (!pmd_present(*pmd))
20291+ continue;
20292+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20293+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20294+ }
20295+#endif
20296+
20297+ flush_tlb_all();
20298+#endif
20299+
20300 free_init_pages("unused kernel memory",
20301 (unsigned long)(&__init_begin),
20302 (unsigned long)(&__init_end));
20303diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
20304--- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20305+++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20306@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20307 type = kmap_atomic_idx_push();
20308 idx = type + KM_TYPE_NR * smp_processor_id();
20309 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20310+
20311+ pax_open_kernel();
20312 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20313+ pax_close_kernel();
20314+
20315 arch_flush_lazy_mmu_mode();
20316
20317 return (void *)vaddr;
20318diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
20319--- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20320+++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20321@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20322 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20323 int is_ram = page_is_ram(pfn);
20324
20325- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20326+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20327 return NULL;
20328 WARN_ON_ONCE(is_ram);
20329 }
20330@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20331 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20332
20333 static __initdata int after_paging_init;
20334-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20335+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20336
20337 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20338 {
20339@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20340 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20341
20342 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20343- memset(bm_pte, 0, sizeof(bm_pte));
20344- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20345+ pmd_populate_user(&init_mm, pmd, bm_pte);
20346
20347 /*
20348 * The boot-ioremap range spans multiple pmds, for which
20349diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
20350--- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20351+++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20352@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20353 * memory (e.g. tracked pages)? For now, we need this to avoid
20354 * invoking kmemcheck for PnP BIOS calls.
20355 */
20356- if (regs->flags & X86_VM_MASK)
20357+ if (v8086_mode(regs))
20358 return false;
20359- if (regs->cs != __KERNEL_CS)
20360+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20361 return false;
20362
20363 pte = kmemcheck_pte_lookup(address);
20364diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
20365--- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20366+++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20367@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20368 * Leave an at least ~128 MB hole with possible stack randomization.
20369 */
20370 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20371-#define MAX_GAP (TASK_SIZE/6*5)
20372+#define MAX_GAP (pax_task_size/6*5)
20373
20374 /*
20375 * True on X86_32 or when emulating IA32 on X86_64
20376@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20377 return rnd << PAGE_SHIFT;
20378 }
20379
20380-static unsigned long mmap_base(void)
20381+static unsigned long mmap_base(struct mm_struct *mm)
20382 {
20383 unsigned long gap = rlimit(RLIMIT_STACK);
20384+ unsigned long pax_task_size = TASK_SIZE;
20385+
20386+#ifdef CONFIG_PAX_SEGMEXEC
20387+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20388+ pax_task_size = SEGMEXEC_TASK_SIZE;
20389+#endif
20390
20391 if (gap < MIN_GAP)
20392 gap = MIN_GAP;
20393 else if (gap > MAX_GAP)
20394 gap = MAX_GAP;
20395
20396- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20397+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20398 }
20399
20400 /*
20401 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20402 * does, but not when emulating X86_32
20403 */
20404-static unsigned long mmap_legacy_base(void)
20405+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20406 {
20407- if (mmap_is_ia32())
20408+ if (mmap_is_ia32()) {
20409+
20410+#ifdef CONFIG_PAX_SEGMEXEC
20411+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20412+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20413+ else
20414+#endif
20415+
20416 return TASK_UNMAPPED_BASE;
20417- else
20418+ } else
20419 return TASK_UNMAPPED_BASE + mmap_rnd();
20420 }
20421
20422@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20423 void arch_pick_mmap_layout(struct mm_struct *mm)
20424 {
20425 if (mmap_is_legacy()) {
20426- mm->mmap_base = mmap_legacy_base();
20427+ mm->mmap_base = mmap_legacy_base(mm);
20428+
20429+#ifdef CONFIG_PAX_RANDMMAP
20430+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20431+ mm->mmap_base += mm->delta_mmap;
20432+#endif
20433+
20434 mm->get_unmapped_area = arch_get_unmapped_area;
20435 mm->unmap_area = arch_unmap_area;
20436 } else {
20437- mm->mmap_base = mmap_base();
20438+ mm->mmap_base = mmap_base(mm);
20439+
20440+#ifdef CONFIG_PAX_RANDMMAP
20441+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20442+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20443+#endif
20444+
20445 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20446 mm->unmap_area = arch_unmap_area_topdown;
20447 }
20448diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
20449--- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20450+++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20451@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20452 break;
20453 default:
20454 {
20455- unsigned char *ip = (unsigned char *)instptr;
20456+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20457 my_trace->opcode = MMIO_UNKNOWN_OP;
20458 my_trace->width = 0;
20459 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20460@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20461 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20462 void __iomem *addr)
20463 {
20464- static atomic_t next_id;
20465+ static atomic_unchecked_t next_id;
20466 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20467 /* These are page-unaligned. */
20468 struct mmiotrace_map map = {
20469@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20470 .private = trace
20471 },
20472 .phys = offset,
20473- .id = atomic_inc_return(&next_id)
20474+ .id = atomic_inc_return_unchecked(&next_id)
20475 };
20476 map.map_id = trace->id;
20477
20478diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20479--- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20480+++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20481@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20482 */
20483 #ifdef CONFIG_PCI_BIOS
20484 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20485- pgprot_val(forbidden) |= _PAGE_NX;
20486+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20487 #endif
20488
20489 /*
20490@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20491 * Does not cover __inittext since that is gone later on. On
20492 * 64bit we do not enforce !NX on the low mapping
20493 */
20494- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20495- pgprot_val(forbidden) |= _PAGE_NX;
20496+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20497+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20498
20499+#ifdef CONFIG_DEBUG_RODATA
20500 /*
20501 * The .rodata section needs to be read-only. Using the pfn
20502 * catches all aliases.
20503@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20504 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20505 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20506 pgprot_val(forbidden) |= _PAGE_RW;
20507+#endif
20508
20509 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20510 /*
20511@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20512 }
20513 #endif
20514
20515+#ifdef CONFIG_PAX_KERNEXEC
20516+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20517+ pgprot_val(forbidden) |= _PAGE_RW;
20518+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20519+ }
20520+#endif
20521+
20522 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20523
20524 return prot;
20525@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20526 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20527 {
20528 /* change init_mm */
20529+ pax_open_kernel();
20530 set_pte_atomic(kpte, pte);
20531+
20532 #ifdef CONFIG_X86_32
20533 if (!SHARED_KERNEL_PMD) {
20534+
20535+#ifdef CONFIG_PAX_PER_CPU_PGD
20536+ unsigned long cpu;
20537+#else
20538 struct page *page;
20539+#endif
20540
20541+#ifdef CONFIG_PAX_PER_CPU_PGD
20542+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20543+ pgd_t *pgd = get_cpu_pgd(cpu);
20544+#else
20545 list_for_each_entry(page, &pgd_list, lru) {
20546- pgd_t *pgd;
20547+ pgd_t *pgd = (pgd_t *)page_address(page);
20548+#endif
20549+
20550 pud_t *pud;
20551 pmd_t *pmd;
20552
20553- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20554+ pgd += pgd_index(address);
20555 pud = pud_offset(pgd, address);
20556 pmd = pmd_offset(pud, address);
20557 set_pte_atomic((pte_t *)pmd, pte);
20558 }
20559 }
20560 #endif
20561+ pax_close_kernel();
20562 }
20563
20564 static int
20565diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20566--- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20567+++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20568@@ -36,7 +36,7 @@ enum {
20569
20570 static int pte_testbit(pte_t pte)
20571 {
20572- return pte_flags(pte) & _PAGE_UNUSED1;
20573+ return pte_flags(pte) & _PAGE_CPA_TEST;
20574 }
20575
20576 struct split_state {
20577diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20578--- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20579+++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20580@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20581
20582 if (!entry) {
20583 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20584- current->comm, current->pid, start, end);
20585+ current->comm, task_pid_nr(current), start, end);
20586 return -EINVAL;
20587 }
20588
20589@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20590 while (cursor < to) {
20591 if (!devmem_is_allowed(pfn)) {
20592 printk(KERN_INFO
20593- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20594- current->comm, from, to);
20595+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20596+ current->comm, from, to, cursor);
20597 return 0;
20598 }
20599 cursor += PAGE_SIZE;
20600@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20601 printk(KERN_INFO
20602 "%s:%d ioremap_change_attr failed %s "
20603 "for %Lx-%Lx\n",
20604- current->comm, current->pid,
20605+ current->comm, task_pid_nr(current),
20606 cattr_name(flags),
20607 base, (unsigned long long)(base + size));
20608 return -EINVAL;
20609@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20610 if (want_flags != flags) {
20611 printk(KERN_WARNING
20612 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20613- current->comm, current->pid,
20614+ current->comm, task_pid_nr(current),
20615 cattr_name(want_flags),
20616 (unsigned long long)paddr,
20617 (unsigned long long)(paddr + size),
20618@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20619 free_memtype(paddr, paddr + size);
20620 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20621 " for %Lx-%Lx, got %s\n",
20622- current->comm, current->pid,
20623+ current->comm, task_pid_nr(current),
20624 cattr_name(want_flags),
20625 (unsigned long long)paddr,
20626 (unsigned long long)(paddr + size),
20627diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20628--- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20629+++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20630@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20631 int i;
20632 enum reason_type rv = OTHERS;
20633
20634- p = (unsigned char *)ins_addr;
20635+ p = (unsigned char *)ktla_ktva(ins_addr);
20636 p += skip_prefix(p, &prf);
20637 p += get_opcode(p, &opcode);
20638
20639@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20640 struct prefix_bits prf;
20641 int i;
20642
20643- p = (unsigned char *)ins_addr;
20644+ p = (unsigned char *)ktla_ktva(ins_addr);
20645 p += skip_prefix(p, &prf);
20646 p += get_opcode(p, &opcode);
20647
20648@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20649 struct prefix_bits prf;
20650 int i;
20651
20652- p = (unsigned char *)ins_addr;
20653+ p = (unsigned char *)ktla_ktva(ins_addr);
20654 p += skip_prefix(p, &prf);
20655 p += get_opcode(p, &opcode);
20656
20657@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20658 struct prefix_bits prf;
20659 int i;
20660
20661- p = (unsigned char *)ins_addr;
20662+ p = (unsigned char *)ktla_ktva(ins_addr);
20663 p += skip_prefix(p, &prf);
20664 p += get_opcode(p, &opcode);
20665 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20666@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20667 struct prefix_bits prf;
20668 int i;
20669
20670- p = (unsigned char *)ins_addr;
20671+ p = (unsigned char *)ktla_ktva(ins_addr);
20672 p += skip_prefix(p, &prf);
20673 p += get_opcode(p, &opcode);
20674 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20675diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20676--- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20677+++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20678@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20679 return;
20680 }
20681 pte = pte_offset_kernel(pmd, vaddr);
20682+
20683+ pax_open_kernel();
20684 if (pte_val(pteval))
20685 set_pte_at(&init_mm, vaddr, pte, pteval);
20686 else
20687 pte_clear(&init_mm, vaddr, pte);
20688+ pax_close_kernel();
20689
20690 /*
20691 * It's enough to flush this one mapping.
20692diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20693--- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20694+++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20695@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20696 list_del(&page->lru);
20697 }
20698
20699-#define UNSHARED_PTRS_PER_PGD \
20700- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20701+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20702+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20703
20704+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20705+{
20706+ while (count--)
20707+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20708+}
20709+#endif
20710+
20711+#ifdef CONFIG_PAX_PER_CPU_PGD
20712+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20713+{
20714+ while (count--)
20715+
20716+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20717+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20718+#else
20719+ *dst++ = *src++;
20720+#endif
20721
20722+}
20723+#endif
20724+
20725+#ifdef CONFIG_X86_64
20726+#define pxd_t pud_t
20727+#define pyd_t pgd_t
20728+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20729+#define pxd_free(mm, pud) pud_free((mm), (pud))
20730+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20731+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20732+#define PYD_SIZE PGDIR_SIZE
20733+#else
20734+#define pxd_t pmd_t
20735+#define pyd_t pud_t
20736+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20737+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20738+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20739+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20740+#define PYD_SIZE PUD_SIZE
20741+#endif
20742+
20743+#ifdef CONFIG_PAX_PER_CPU_PGD
20744+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20745+static inline void pgd_dtor(pgd_t *pgd) {}
20746+#else
20747 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20748 {
20749 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20750@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20751 pgd_list_del(pgd);
20752 spin_unlock(&pgd_lock);
20753 }
20754+#endif
20755
20756 /*
20757 * List of all pgd's needed for non-PAE so it can invalidate entries
20758@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20759 * -- wli
20760 */
20761
20762-#ifdef CONFIG_X86_PAE
20763+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20764 /*
20765 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20766 * updating the top-level pagetable entries to guarantee the
20767@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20768 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20769 * and initialize the kernel pmds here.
20770 */
20771-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20772+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20773
20774 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20775 {
20776@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20777 */
20778 flush_tlb_mm(mm);
20779 }
20780+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20781+#define PREALLOCATED_PXDS USER_PGD_PTRS
20782 #else /* !CONFIG_X86_PAE */
20783
20784 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20785-#define PREALLOCATED_PMDS 0
20786+#define PREALLOCATED_PXDS 0
20787
20788 #endif /* CONFIG_X86_PAE */
20789
20790-static void free_pmds(pmd_t *pmds[])
20791+static void free_pxds(pxd_t *pxds[])
20792 {
20793 int i;
20794
20795- for(i = 0; i < PREALLOCATED_PMDS; i++)
20796- if (pmds[i])
20797- free_page((unsigned long)pmds[i]);
20798+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20799+ if (pxds[i])
20800+ free_page((unsigned long)pxds[i]);
20801 }
20802
20803-static int preallocate_pmds(pmd_t *pmds[])
20804+static int preallocate_pxds(pxd_t *pxds[])
20805 {
20806 int i;
20807 bool failed = false;
20808
20809- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20810- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20811- if (pmd == NULL)
20812+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20813+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20814+ if (pxd == NULL)
20815 failed = true;
20816- pmds[i] = pmd;
20817+ pxds[i] = pxd;
20818 }
20819
20820 if (failed) {
20821- free_pmds(pmds);
20822+ free_pxds(pxds);
20823 return -ENOMEM;
20824 }
20825
20826@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20827 * preallocate which never got a corresponding vma will need to be
20828 * freed manually.
20829 */
20830-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20831+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20832 {
20833 int i;
20834
20835- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20836+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20837 pgd_t pgd = pgdp[i];
20838
20839 if (pgd_val(pgd) != 0) {
20840- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20841+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20842
20843- pgdp[i] = native_make_pgd(0);
20844+ set_pgd(pgdp + i, native_make_pgd(0));
20845
20846- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20847- pmd_free(mm, pmd);
20848+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20849+ pxd_free(mm, pxd);
20850 }
20851 }
20852 }
20853
20854-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20855+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20856 {
20857- pud_t *pud;
20858+ pyd_t *pyd;
20859 unsigned long addr;
20860 int i;
20861
20862- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20863+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20864 return;
20865
20866- pud = pud_offset(pgd, 0);
20867+#ifdef CONFIG_X86_64
20868+ pyd = pyd_offset(mm, 0L);
20869+#else
20870+ pyd = pyd_offset(pgd, 0L);
20871+#endif
20872
20873- for (addr = i = 0; i < PREALLOCATED_PMDS;
20874- i++, pud++, addr += PUD_SIZE) {
20875- pmd_t *pmd = pmds[i];
20876+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20877+ i++, pyd++, addr += PYD_SIZE) {
20878+ pxd_t *pxd = pxds[i];
20879
20880 if (i >= KERNEL_PGD_BOUNDARY)
20881- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20882- sizeof(pmd_t) * PTRS_PER_PMD);
20883+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20884+ sizeof(pxd_t) * PTRS_PER_PMD);
20885
20886- pud_populate(mm, pud, pmd);
20887+ pyd_populate(mm, pyd, pxd);
20888 }
20889 }
20890
20891 pgd_t *pgd_alloc(struct mm_struct *mm)
20892 {
20893 pgd_t *pgd;
20894- pmd_t *pmds[PREALLOCATED_PMDS];
20895+ pxd_t *pxds[PREALLOCATED_PXDS];
20896
20897 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20898
20899@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20900
20901 mm->pgd = pgd;
20902
20903- if (preallocate_pmds(pmds) != 0)
20904+ if (preallocate_pxds(pxds) != 0)
20905 goto out_free_pgd;
20906
20907 if (paravirt_pgd_alloc(mm) != 0)
20908- goto out_free_pmds;
20909+ goto out_free_pxds;
20910
20911 /*
20912 * Make sure that pre-populating the pmds is atomic with
20913@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20914 spin_lock(&pgd_lock);
20915
20916 pgd_ctor(mm, pgd);
20917- pgd_prepopulate_pmd(mm, pgd, pmds);
20918+ pgd_prepopulate_pxd(mm, pgd, pxds);
20919
20920 spin_unlock(&pgd_lock);
20921
20922 return pgd;
20923
20924-out_free_pmds:
20925- free_pmds(pmds);
20926+out_free_pxds:
20927+ free_pxds(pxds);
20928 out_free_pgd:
20929 free_page((unsigned long)pgd);
20930 out:
20931@@ -295,7 +344,7 @@ out:
20932
20933 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20934 {
20935- pgd_mop_up_pmds(mm, pgd);
20936+ pgd_mop_up_pxds(mm, pgd);
20937 pgd_dtor(pgd);
20938 paravirt_pgd_free(mm, pgd);
20939 free_page((unsigned long)pgd);
20940diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20941--- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20942+++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20943@@ -5,8 +5,10 @@
20944 #include <asm/pgtable.h>
20945 #include <asm/proto.h>
20946
20947+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20948 static int disable_nx __cpuinitdata;
20949
20950+#ifndef CONFIG_PAX_PAGEEXEC
20951 /*
20952 * noexec = on|off
20953 *
20954@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20955 return 0;
20956 }
20957 early_param("noexec", noexec_setup);
20958+#endif
20959+
20960+#endif
20961
20962 void __cpuinit x86_configure_nx(void)
20963 {
20964+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20965 if (cpu_has_nx && !disable_nx)
20966 __supported_pte_mask |= _PAGE_NX;
20967 else
20968+#endif
20969 __supported_pte_mask &= ~_PAGE_NX;
20970 }
20971
20972diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20973--- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20974+++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20975@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20976 BUG();
20977 cpumask_clear_cpu(cpu,
20978 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20979+
20980+#ifndef CONFIG_PAX_PER_CPU_PGD
20981 load_cr3(swapper_pg_dir);
20982+#endif
20983+
20984 }
20985 EXPORT_SYMBOL_GPL(leave_mm);
20986
20987diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20988--- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20989+++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20990@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20991 module_free(NULL, image);
20992 return;
20993 }
20994+ pax_open_kernel();
20995 memcpy(image + proglen, temp, ilen);
20996+ pax_close_kernel();
20997 }
20998 proglen += ilen;
20999 addrs[i] = proglen;
21000@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21001 break;
21002 }
21003 if (proglen == oldproglen) {
21004- image = module_alloc(max_t(unsigned int,
21005+ image = module_alloc_exec(max_t(unsigned int,
21006 proglen,
21007 sizeof(struct work_struct)));
21008 if (!image)
21009diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
21010--- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21011+++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
21012@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21013 {
21014 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21015
21016- if (!user_mode_vm(regs)) {
21017+ if (!user_mode(regs)) {
21018 unsigned long stack = kernel_stack_pointer(regs);
21019 if (depth)
21020 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21021diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
21022--- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21023+++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21024@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21025 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21026 pci_mmcfg_late_init();
21027 pcibios_enable_irq = mrst_pci_irq_enable;
21028- pci_root_ops = pci_mrst_ops;
21029+ pax_open_kernel();
21030+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21031+ pax_close_kernel();
21032 /* Continue with standard init */
21033 return 1;
21034 }
21035diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
21036--- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21037+++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21038@@ -79,50 +79,93 @@ union bios32 {
21039 static struct {
21040 unsigned long address;
21041 unsigned short segment;
21042-} bios32_indirect = { 0, __KERNEL_CS };
21043+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21044
21045 /*
21046 * Returns the entry point for the given service, NULL on error
21047 */
21048
21049-static unsigned long bios32_service(unsigned long service)
21050+static unsigned long __devinit bios32_service(unsigned long service)
21051 {
21052 unsigned char return_code; /* %al */
21053 unsigned long address; /* %ebx */
21054 unsigned long length; /* %ecx */
21055 unsigned long entry; /* %edx */
21056 unsigned long flags;
21057+ struct desc_struct d, *gdt;
21058
21059 local_irq_save(flags);
21060- __asm__("lcall *(%%edi); cld"
21061+
21062+ gdt = get_cpu_gdt_table(smp_processor_id());
21063+
21064+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21065+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21066+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21067+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21068+
21069+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21070 : "=a" (return_code),
21071 "=b" (address),
21072 "=c" (length),
21073 "=d" (entry)
21074 : "0" (service),
21075 "1" (0),
21076- "D" (&bios32_indirect));
21077+ "D" (&bios32_indirect),
21078+ "r"(__PCIBIOS_DS)
21079+ : "memory");
21080+
21081+ pax_open_kernel();
21082+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21083+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21084+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21085+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21086+ pax_close_kernel();
21087+
21088 local_irq_restore(flags);
21089
21090 switch (return_code) {
21091- case 0:
21092- return address + entry;
21093- case 0x80: /* Not present */
21094- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21095- return 0;
21096- default: /* Shouldn't happen */
21097- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21098- service, return_code);
21099+ case 0: {
21100+ int cpu;
21101+ unsigned char flags;
21102+
21103+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21104+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21105+ printk(KERN_WARNING "bios32_service: not valid\n");
21106 return 0;
21107+ }
21108+ address = address + PAGE_OFFSET;
21109+ length += 16UL; /* some BIOSs underreport this... */
21110+ flags = 4;
21111+ if (length >= 64*1024*1024) {
21112+ length >>= PAGE_SHIFT;
21113+ flags |= 8;
21114+ }
21115+
21116+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21117+ gdt = get_cpu_gdt_table(cpu);
21118+ pack_descriptor(&d, address, length, 0x9b, flags);
21119+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21120+ pack_descriptor(&d, address, length, 0x93, flags);
21121+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21122+ }
21123+ return entry;
21124+ }
21125+ case 0x80: /* Not present */
21126+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21127+ return 0;
21128+ default: /* Shouldn't happen */
21129+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21130+ service, return_code);
21131+ return 0;
21132 }
21133 }
21134
21135 static struct {
21136 unsigned long address;
21137 unsigned short segment;
21138-} pci_indirect = { 0, __KERNEL_CS };
21139+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21140
21141-static int pci_bios_present;
21142+static int pci_bios_present __read_only;
21143
21144 static int __devinit check_pcibios(void)
21145 {
21146@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21147 unsigned long flags, pcibios_entry;
21148
21149 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21150- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21151+ pci_indirect.address = pcibios_entry;
21152
21153 local_irq_save(flags);
21154- __asm__(
21155- "lcall *(%%edi); cld\n\t"
21156+ __asm__("movw %w6, %%ds\n\t"
21157+ "lcall *%%ss:(%%edi); cld\n\t"
21158+ "push %%ss\n\t"
21159+ "pop %%ds\n\t"
21160 "jc 1f\n\t"
21161 "xor %%ah, %%ah\n"
21162 "1:"
21163@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21164 "=b" (ebx),
21165 "=c" (ecx)
21166 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21167- "D" (&pci_indirect)
21168+ "D" (&pci_indirect),
21169+ "r" (__PCIBIOS_DS)
21170 : "memory");
21171 local_irq_restore(flags);
21172
21173@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21174
21175 switch (len) {
21176 case 1:
21177- __asm__("lcall *(%%esi); cld\n\t"
21178+ __asm__("movw %w6, %%ds\n\t"
21179+ "lcall *%%ss:(%%esi); cld\n\t"
21180+ "push %%ss\n\t"
21181+ "pop %%ds\n\t"
21182 "jc 1f\n\t"
21183 "xor %%ah, %%ah\n"
21184 "1:"
21185@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21186 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21187 "b" (bx),
21188 "D" ((long)reg),
21189- "S" (&pci_indirect));
21190+ "S" (&pci_indirect),
21191+ "r" (__PCIBIOS_DS));
21192 /*
21193 * Zero-extend the result beyond 8 bits, do not trust the
21194 * BIOS having done it:
21195@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21196 *value &= 0xff;
21197 break;
21198 case 2:
21199- __asm__("lcall *(%%esi); cld\n\t"
21200+ __asm__("movw %w6, %%ds\n\t"
21201+ "lcall *%%ss:(%%esi); cld\n\t"
21202+ "push %%ss\n\t"
21203+ "pop %%ds\n\t"
21204 "jc 1f\n\t"
21205 "xor %%ah, %%ah\n"
21206 "1:"
21207@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21208 : "1" (PCIBIOS_READ_CONFIG_WORD),
21209 "b" (bx),
21210 "D" ((long)reg),
21211- "S" (&pci_indirect));
21212+ "S" (&pci_indirect),
21213+ "r" (__PCIBIOS_DS));
21214 /*
21215 * Zero-extend the result beyond 16 bits, do not trust the
21216 * BIOS having done it:
21217@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21218 *value &= 0xffff;
21219 break;
21220 case 4:
21221- __asm__("lcall *(%%esi); cld\n\t"
21222+ __asm__("movw %w6, %%ds\n\t"
21223+ "lcall *%%ss:(%%esi); cld\n\t"
21224+ "push %%ss\n\t"
21225+ "pop %%ds\n\t"
21226 "jc 1f\n\t"
21227 "xor %%ah, %%ah\n"
21228 "1:"
21229@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21230 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21231 "b" (bx),
21232 "D" ((long)reg),
21233- "S" (&pci_indirect));
21234+ "S" (&pci_indirect),
21235+ "r" (__PCIBIOS_DS));
21236 break;
21237 }
21238
21239@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21240
21241 switch (len) {
21242 case 1:
21243- __asm__("lcall *(%%esi); cld\n\t"
21244+ __asm__("movw %w6, %%ds\n\t"
21245+ "lcall *%%ss:(%%esi); cld\n\t"
21246+ "push %%ss\n\t"
21247+ "pop %%ds\n\t"
21248 "jc 1f\n\t"
21249 "xor %%ah, %%ah\n"
21250 "1:"
21251@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21252 "c" (value),
21253 "b" (bx),
21254 "D" ((long)reg),
21255- "S" (&pci_indirect));
21256+ "S" (&pci_indirect),
21257+ "r" (__PCIBIOS_DS));
21258 break;
21259 case 2:
21260- __asm__("lcall *(%%esi); cld\n\t"
21261+ __asm__("movw %w6, %%ds\n\t"
21262+ "lcall *%%ss:(%%esi); cld\n\t"
21263+ "push %%ss\n\t"
21264+ "pop %%ds\n\t"
21265 "jc 1f\n\t"
21266 "xor %%ah, %%ah\n"
21267 "1:"
21268@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21269 "c" (value),
21270 "b" (bx),
21271 "D" ((long)reg),
21272- "S" (&pci_indirect));
21273+ "S" (&pci_indirect),
21274+ "r" (__PCIBIOS_DS));
21275 break;
21276 case 4:
21277- __asm__("lcall *(%%esi); cld\n\t"
21278+ __asm__("movw %w6, %%ds\n\t"
21279+ "lcall *%%ss:(%%esi); cld\n\t"
21280+ "push %%ss\n\t"
21281+ "pop %%ds\n\t"
21282 "jc 1f\n\t"
21283 "xor %%ah, %%ah\n"
21284 "1:"
21285@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21286 "c" (value),
21287 "b" (bx),
21288 "D" ((long)reg),
21289- "S" (&pci_indirect));
21290+ "S" (&pci_indirect),
21291+ "r" (__PCIBIOS_DS));
21292 break;
21293 }
21294
21295@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21296
21297 DBG("PCI: Fetching IRQ routing table... ");
21298 __asm__("push %%es\n\t"
21299+ "movw %w8, %%ds\n\t"
21300 "push %%ds\n\t"
21301 "pop %%es\n\t"
21302- "lcall *(%%esi); cld\n\t"
21303+ "lcall *%%ss:(%%esi); cld\n\t"
21304 "pop %%es\n\t"
21305+ "push %%ss\n\t"
21306+ "pop %%ds\n"
21307 "jc 1f\n\t"
21308 "xor %%ah, %%ah\n"
21309 "1:"
21310@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21311 "1" (0),
21312 "D" ((long) &opt),
21313 "S" (&pci_indirect),
21314- "m" (opt)
21315+ "m" (opt),
21316+ "r" (__PCIBIOS_DS)
21317 : "memory");
21318 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21319 if (ret & 0xff00)
21320@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21321 {
21322 int ret;
21323
21324- __asm__("lcall *(%%esi); cld\n\t"
21325+ __asm__("movw %w5, %%ds\n\t"
21326+ "lcall *%%ss:(%%esi); cld\n\t"
21327+ "push %%ss\n\t"
21328+ "pop %%ds\n"
21329 "jc 1f\n\t"
21330 "xor %%ah, %%ah\n"
21331 "1:"
21332@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21333 : "0" (PCIBIOS_SET_PCI_HW_INT),
21334 "b" ((dev->bus->number << 8) | dev->devfn),
21335 "c" ((irq << 8) | (pin + 10)),
21336- "S" (&pci_indirect));
21337+ "S" (&pci_indirect),
21338+ "r" (__PCIBIOS_DS));
21339 return !(ret & 0xff00);
21340 }
21341 EXPORT_SYMBOL(pcibios_set_irq_routing);
21342diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
21343--- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21344+++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-09-17 00:53:42.000000000 -0400
21345@@ -38,70 +38,56 @@
21346 */
21347
21348 static unsigned long efi_rt_eflags;
21349-static pgd_t efi_bak_pg_dir_pointer[2];
21350+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21351
21352-void efi_call_phys_prelog(void)
21353+void __init efi_call_phys_prelog(void)
21354 {
21355- unsigned long cr4;
21356- unsigned long temp;
21357 struct desc_ptr gdt_descr;
21358
21359- local_irq_save(efi_rt_eflags);
21360+#ifdef CONFIG_PAX_KERNEXEC
21361+ struct desc_struct d;
21362+#endif
21363
21364- /*
21365- * If I don't have PAE, I should just duplicate two entries in page
21366- * directory. If I have PAE, I just need to duplicate one entry in
21367- * page directory.
21368- */
21369- cr4 = read_cr4_safe();
21370+ local_irq_save(efi_rt_eflags);
21371
21372- if (cr4 & X86_CR4_PAE) {
21373- efi_bak_pg_dir_pointer[0].pgd =
21374- swapper_pg_dir[pgd_index(0)].pgd;
21375- swapper_pg_dir[0].pgd =
21376- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21377- } else {
21378- efi_bak_pg_dir_pointer[0].pgd =
21379- swapper_pg_dir[pgd_index(0)].pgd;
21380- efi_bak_pg_dir_pointer[1].pgd =
21381- swapper_pg_dir[pgd_index(0x400000)].pgd;
21382- swapper_pg_dir[pgd_index(0)].pgd =
21383- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21384- temp = PAGE_OFFSET + 0x400000;
21385- swapper_pg_dir[pgd_index(0x400000)].pgd =
21386- swapper_pg_dir[pgd_index(temp)].pgd;
21387- }
21388+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21389+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21390+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21391
21392 /*
21393 * After the lock is released, the original page table is restored.
21394 */
21395 __flush_tlb_all();
21396
21397- gdt_descr.address = __pa(get_cpu_gdt_table(0));
21398+#ifdef CONFIG_PAX_KERNEXEC
21399+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21400+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_CS, &d, DESCTYPE_S);
21401+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21402+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_DS, &d, DESCTYPE_S);
21403+#endif
21404+
21405+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21406 gdt_descr.size = GDT_SIZE - 1;
21407 load_gdt(&gdt_descr);
21408 }
21409
21410-void efi_call_phys_epilog(void)
21411+void __init efi_call_phys_epilog(void)
21412 {
21413- unsigned long cr4;
21414 struct desc_ptr gdt_descr;
21415
21416- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21417+#ifdef CONFIG_PAX_KERNEXEC
21418+ struct desc_struct d;
21419+
21420+ memset(&d, 0, sizeof d);
21421+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_CS, &d, DESCTYPE_S);
21422+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_DS, &d, DESCTYPE_S);
21423+#endif
21424+
21425+ gdt_descr.address = get_cpu_gdt_table(0);
21426 gdt_descr.size = GDT_SIZE - 1;
21427 load_gdt(&gdt_descr);
21428
21429- cr4 = read_cr4_safe();
21430-
21431- if (cr4 & X86_CR4_PAE) {
21432- swapper_pg_dir[pgd_index(0)].pgd =
21433- efi_bak_pg_dir_pointer[0].pgd;
21434- } else {
21435- swapper_pg_dir[pgd_index(0)].pgd =
21436- efi_bak_pg_dir_pointer[0].pgd;
21437- swapper_pg_dir[pgd_index(0x400000)].pgd =
21438- efi_bak_pg_dir_pointer[1].pgd;
21439- }
21440+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21441
21442 /*
21443 * After the lock is released, the original page table is restored.
21444diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
21445--- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21446+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-09-17 00:53:42.000000000 -0400
21447@@ -6,6 +6,7 @@
21448 */
21449
21450 #include <linux/linkage.h>
21451+#include <linux/init.h>
21452 #include <asm/page_types.h>
21453
21454 /*
21455@@ -20,7 +21,7 @@
21456 * service functions will comply with gcc calling convention, too.
21457 */
21458
21459-.text
21460+__INIT
21461 ENTRY(efi_call_phys)
21462 /*
21463 * 0. The function can only be called in Linux kernel. So CS has been
21464@@ -36,9 +37,11 @@ ENTRY(efi_call_phys)
21465 * The mapping of lower virtual memory has been created in prelog and
21466 * epilog.
21467 */
21468- movl $1f, %edx
21469- subl $__PAGE_OFFSET, %edx
21470- jmp *%edx
21471+ movl $(__KERNEXEC_EFI_DS), %edx
21472+ mov %edx, %ds
21473+ mov %edx, %es
21474+ mov %edx, %ss
21475+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21476 1:
21477
21478 /*
21479@@ -47,14 +50,8 @@ ENTRY(efi_call_phys)
21480 * parameter 2, ..., param n. To make things easy, we save the return
21481 * address of efi_call_phys in a global variable.
21482 */
21483- popl %edx
21484- movl %edx, saved_return_addr
21485- /* get the function pointer into ECX*/
21486- popl %ecx
21487- movl %ecx, efi_rt_function_ptr
21488- movl $2f, %edx
21489- subl $__PAGE_OFFSET, %edx
21490- pushl %edx
21491+ popl (saved_return_addr)
21492+ popl (efi_rt_function_ptr)
21493
21494 /*
21495 * 3. Clear PG bit in %CR0.
21496@@ -73,9 +70,8 @@ ENTRY(efi_call_phys)
21497 /*
21498 * 5. Call the physical function.
21499 */
21500- jmp *%ecx
21501+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21502
21503-2:
21504 /*
21505 * 6. After EFI runtime service returns, control will return to
21506 * following instruction. We'd better readjust stack pointer first.
21507@@ -88,35 +84,32 @@ ENTRY(efi_call_phys)
21508 movl %cr0, %edx
21509 orl $0x80000000, %edx
21510 movl %edx, %cr0
21511- jmp 1f
21512-1:
21513+
21514 /*
21515 * 8. Now restore the virtual mode from flat mode by
21516 * adding EIP with PAGE_OFFSET.
21517 */
21518- movl $1f, %edx
21519- jmp *%edx
21520+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
21521 1:
21522+ movl $(__KERNEL_DS), %edx
21523+ mov %edx, %ds
21524+ mov %edx, %es
21525+ mov %edx, %ss
21526
21527 /*
21528 * 9. Balance the stack. And because EAX contain the return value,
21529 * we'd better not clobber it.
21530 */
21531- leal efi_rt_function_ptr, %edx
21532- movl (%edx), %ecx
21533- pushl %ecx
21534+ pushl (efi_rt_function_ptr)
21535
21536 /*
21537- * 10. Push the saved return address onto the stack and return.
21538+ * 10. Return to the saved return address.
21539 */
21540- leal saved_return_addr, %edx
21541- movl (%edx), %ecx
21542- pushl %ecx
21543- ret
21544+ jmpl *(saved_return_addr)
21545 ENDPROC(efi_call_phys)
21546 .previous
21547
21548-.data
21549+__INITDATA
21550 saved_return_addr:
21551 .long 0
21552 efi_rt_function_ptr:
21553diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S
21554--- linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
21555+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-09-17 18:31:51.000000000 -0400
21556@@ -40,6 +40,9 @@ ENTRY(efi_call0)
21557 call *%rdi
21558 addq $32, %rsp
21559 RESTORE_XMM
21560+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21561+ orb $0x80, 0x7(%rsp)
21562+#endif
21563 ret
21564 ENDPROC(efi_call0)
21565
21566@@ -50,6 +53,9 @@ ENTRY(efi_call1)
21567 call *%rdi
21568 addq $32, %rsp
21569 RESTORE_XMM
21570+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21571+ orb $0x80, 0x7(%rsp)
21572+#endif
21573 ret
21574 ENDPROC(efi_call1)
21575
21576@@ -60,6 +66,9 @@ ENTRY(efi_call2)
21577 call *%rdi
21578 addq $32, %rsp
21579 RESTORE_XMM
21580+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21581+ orb $0x80, 0x7(%rsp)
21582+#endif
21583 ret
21584 ENDPROC(efi_call2)
21585
21586@@ -71,6 +80,9 @@ ENTRY(efi_call3)
21587 call *%rdi
21588 addq $32, %rsp
21589 RESTORE_XMM
21590+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21591+ orb $0x80, 0x7(%rsp)
21592+#endif
21593 ret
21594 ENDPROC(efi_call3)
21595
21596@@ -83,6 +95,9 @@ ENTRY(efi_call4)
21597 call *%rdi
21598 addq $32, %rsp
21599 RESTORE_XMM
21600+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21601+ orb $0x80, 0x7(%rsp)
21602+#endif
21603 ret
21604 ENDPROC(efi_call4)
21605
21606@@ -96,6 +111,9 @@ ENTRY(efi_call5)
21607 call *%rdi
21608 addq $48, %rsp
21609 RESTORE_XMM
21610+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21611+ orb $0x80, 0x7(%rsp)
21612+#endif
21613 ret
21614 ENDPROC(efi_call5)
21615
21616@@ -112,5 +130,8 @@ ENTRY(efi_call6)
21617 call *%rdi
21618 addq $48, %rsp
21619 RESTORE_XMM
21620+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21621+ orb $0x80, 0x7(%rsp)
21622+#endif
21623 ret
21624 ENDPROC(efi_call6)
21625diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21626--- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21627+++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21628@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21629 }
21630
21631 /* Reboot and power off are handled by the SCU on a MID device */
21632-static void mrst_power_off(void)
21633+static __noreturn void mrst_power_off(void)
21634 {
21635 intel_scu_ipc_simple_command(0xf1, 1);
21636+ BUG();
21637 }
21638
21639-static void mrst_reboot(void)
21640+static __noreturn void mrst_reboot(void)
21641 {
21642 intel_scu_ipc_simple_command(0xf1, 0);
21643+ BUG();
21644 }
21645
21646 /*
21647diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21648--- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21649+++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21650@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21651 cpumask_t mask;
21652 struct reset_args reset_args;
21653
21654+ pax_track_stack();
21655+
21656 reset_args.sender = sender;
21657 cpus_clear(mask);
21658 /* find a single cpu for each uvhub in this distribution mask */
21659diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21660--- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21661+++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21662@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21663 static void fix_processor_context(void)
21664 {
21665 int cpu = smp_processor_id();
21666- struct tss_struct *t = &per_cpu(init_tss, cpu);
21667+ struct tss_struct *t = init_tss + cpu;
21668
21669 set_tss_desc(cpu, t); /*
21670 * This just modifies memory; should not be
21671@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21672 */
21673
21674 #ifdef CONFIG_X86_64
21675+ pax_open_kernel();
21676 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21677+ pax_close_kernel();
21678
21679 syscall_init(); /* This sets MSR_*STAR and related */
21680 #endif
21681diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21682--- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21683+++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21684@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21685 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21686 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21687
21688-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21689+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21690 GCOV_PROFILE := n
21691
21692 #
21693diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21694--- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21695+++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21696@@ -25,6 +25,7 @@
21697 #include <asm/tlbflush.h>
21698 #include <asm/vdso.h>
21699 #include <asm/proto.h>
21700+#include <asm/mman.h>
21701
21702 enum {
21703 VDSO_DISABLED = 0,
21704@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21705 void enable_sep_cpu(void)
21706 {
21707 int cpu = get_cpu();
21708- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21709+ struct tss_struct *tss = init_tss + cpu;
21710
21711 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21712 put_cpu();
21713@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21714 gate_vma.vm_start = FIXADDR_USER_START;
21715 gate_vma.vm_end = FIXADDR_USER_END;
21716 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21717- gate_vma.vm_page_prot = __P101;
21718+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21719 /*
21720 * Make sure the vDSO gets into every core dump.
21721 * Dumping its contents makes post-mortem fully interpretable later
21722@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21723 if (compat)
21724 addr = VDSO_HIGH_BASE;
21725 else {
21726- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21727+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21728 if (IS_ERR_VALUE(addr)) {
21729 ret = addr;
21730 goto up_fail;
21731 }
21732 }
21733
21734- current->mm->context.vdso = (void *)addr;
21735+ current->mm->context.vdso = addr;
21736
21737 if (compat_uses_vma || !compat) {
21738 /*
21739@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21740 }
21741
21742 current_thread_info()->sysenter_return =
21743- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21744+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21745
21746 up_fail:
21747 if (ret)
21748- current->mm->context.vdso = NULL;
21749+ current->mm->context.vdso = 0;
21750
21751 up_write(&mm->mmap_sem);
21752
21753@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21754
21755 const char *arch_vma_name(struct vm_area_struct *vma)
21756 {
21757- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21758+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21759 return "[vdso]";
21760+
21761+#ifdef CONFIG_PAX_SEGMEXEC
21762+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21763+ return "[vdso]";
21764+#endif
21765+
21766 return NULL;
21767 }
21768
21769@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21770 * Check to see if the corresponding task was created in compat vdso
21771 * mode.
21772 */
21773- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21774+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21775 return &gate_vma;
21776 return NULL;
21777 }
21778diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21779--- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21780+++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21781@@ -15,18 +15,19 @@
21782 #include <asm/proto.h>
21783 #include <asm/vdso.h>
21784
21785-unsigned int __read_mostly vdso_enabled = 1;
21786-
21787 extern char vdso_start[], vdso_end[];
21788 extern unsigned short vdso_sync_cpuid;
21789+extern char __vsyscall_0;
21790
21791 static struct page **vdso_pages;
21792+static struct page *vsyscall_page;
21793 static unsigned vdso_size;
21794
21795 static int __init init_vdso_vars(void)
21796 {
21797- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21798- int i;
21799+ size_t nbytes = vdso_end - vdso_start;
21800+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21801+ size_t i;
21802
21803 vdso_size = npages << PAGE_SHIFT;
21804 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21805@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21806 goto oom;
21807 for (i = 0; i < npages; i++) {
21808 struct page *p;
21809- p = alloc_page(GFP_KERNEL);
21810+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21811 if (!p)
21812 goto oom;
21813 vdso_pages[i] = p;
21814- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21815+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21816+ nbytes -= PAGE_SIZE;
21817 }
21818+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21819
21820 return 0;
21821
21822 oom:
21823- printk("Cannot allocate vdso\n");
21824- vdso_enabled = 0;
21825- return -ENOMEM;
21826+ panic("Cannot allocate vdso\n");
21827 }
21828 subsys_initcall(init_vdso_vars);
21829
21830@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21831 unsigned long addr;
21832 int ret;
21833
21834- if (!vdso_enabled)
21835- return 0;
21836-
21837 down_write(&mm->mmap_sem);
21838- addr = vdso_addr(mm->start_stack, vdso_size);
21839- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21840+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21841+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21842 if (IS_ERR_VALUE(addr)) {
21843 ret = addr;
21844 goto up_fail;
21845 }
21846
21847- current->mm->context.vdso = (void *)addr;
21848+ mm->context.vdso = addr + PAGE_SIZE;
21849
21850- ret = install_special_mapping(mm, addr, vdso_size,
21851+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
21852 VM_READ|VM_EXEC|
21853- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21854+ VM_MAYREAD|VM_MAYEXEC|
21855 VM_ALWAYSDUMP,
21856- vdso_pages);
21857+ &vsyscall_page);
21858 if (ret) {
21859- current->mm->context.vdso = NULL;
21860+ mm->context.vdso = 0;
21861 goto up_fail;
21862 }
21863
21864+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21865+ VM_READ|VM_EXEC|
21866+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21867+ VM_ALWAYSDUMP,
21868+ vdso_pages);
21869+ if (ret)
21870+ mm->context.vdso = 0;
21871+
21872 up_fail:
21873 up_write(&mm->mmap_sem);
21874 return ret;
21875 }
21876-
21877-static __init int vdso_setup(char *s)
21878-{
21879- vdso_enabled = simple_strtoul(s, NULL, 0);
21880- return 0;
21881-}
21882-__setup("vdso=", vdso_setup);
21883diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21884--- linux-3.0.4/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
21885+++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
21886@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21887
21888 struct shared_info xen_dummy_shared_info;
21889
21890-void *xen_initial_gdt;
21891-
21892 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21893 __read_mostly int xen_have_vector_callback;
21894 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21895@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21896 #endif
21897 };
21898
21899-static void xen_reboot(int reason)
21900+static __noreturn void xen_reboot(int reason)
21901 {
21902 struct sched_shutdown r = { .reason = reason };
21903
21904@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21905 BUG();
21906 }
21907
21908-static void xen_restart(char *msg)
21909+static __noreturn void xen_restart(char *msg)
21910 {
21911 xen_reboot(SHUTDOWN_reboot);
21912 }
21913
21914-static void xen_emergency_restart(void)
21915+static __noreturn void xen_emergency_restart(void)
21916 {
21917 xen_reboot(SHUTDOWN_reboot);
21918 }
21919
21920-static void xen_machine_halt(void)
21921+static __noreturn void xen_machine_halt(void)
21922 {
21923 xen_reboot(SHUTDOWN_poweroff);
21924 }
21925@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21926 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21927
21928 /* Work out if we support NX */
21929- x86_configure_nx();
21930+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21931+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21932+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21933+ unsigned l, h;
21934+
21935+ __supported_pte_mask |= _PAGE_NX;
21936+ rdmsr(MSR_EFER, l, h);
21937+ l |= EFER_NX;
21938+ wrmsr(MSR_EFER, l, h);
21939+ }
21940+#endif
21941
21942 xen_setup_features();
21943
21944@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21945
21946 machine_ops = xen_machine_ops;
21947
21948- /*
21949- * The only reliable way to retain the initial address of the
21950- * percpu gdt_page is to remember it here, so we can go and
21951- * mark it RW later, when the initial percpu area is freed.
21952- */
21953- xen_initial_gdt = &per_cpu(gdt_page, 0);
21954-
21955 xen_smp_init();
21956
21957 #ifdef CONFIG_ACPI_NUMA
21958diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21959--- linux-3.0.4/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
21960+++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
21961@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21962 convert_pfn_mfn(init_level4_pgt);
21963 convert_pfn_mfn(level3_ident_pgt);
21964 convert_pfn_mfn(level3_kernel_pgt);
21965+ convert_pfn_mfn(level3_vmalloc_pgt);
21966+ convert_pfn_mfn(level3_vmemmap_pgt);
21967
21968 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21969 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21970@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21971 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21972 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21973 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21974+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21975+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21976 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21977+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21978 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21979 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21980
21981@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21982 pv_mmu_ops.set_pud = xen_set_pud;
21983 #if PAGETABLE_LEVELS == 4
21984 pv_mmu_ops.set_pgd = xen_set_pgd;
21985+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21986 #endif
21987
21988 /* This will work as long as patching hasn't happened yet
21989@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21990 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21991 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21992 .set_pgd = xen_set_pgd_hyper,
21993+ .set_pgd_batched = xen_set_pgd_hyper,
21994
21995 .alloc_pud = xen_alloc_pmd_init,
21996 .release_pud = xen_release_pmd_init,
21997diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21998--- linux-3.0.4/arch/x86/xen/smp.c 2011-09-02 18:11:26.000000000 -0400
21999+++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
22000@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
22001 {
22002 BUG_ON(smp_processor_id() != 0);
22003 native_smp_prepare_boot_cpu();
22004-
22005- /* We've switched to the "real" per-cpu gdt, so make sure the
22006- old memory can be recycled */
22007- make_lowmem_page_readwrite(xen_initial_gdt);
22008-
22009 xen_filter_cpu_maps();
22010 xen_setup_vcpu_info_placement();
22011 }
22012@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
22013 gdt = get_cpu_gdt_table(cpu);
22014
22015 ctxt->flags = VGCF_IN_KERNEL;
22016- ctxt->user_regs.ds = __USER_DS;
22017- ctxt->user_regs.es = __USER_DS;
22018+ ctxt->user_regs.ds = __KERNEL_DS;
22019+ ctxt->user_regs.es = __KERNEL_DS;
22020 ctxt->user_regs.ss = __KERNEL_DS;
22021 #ifdef CONFIG_X86_32
22022 ctxt->user_regs.fs = __KERNEL_PERCPU;
22023- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22024+ savesegment(gs, ctxt->user_regs.gs);
22025 #else
22026 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22027 #endif
22028@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
22029 int rc;
22030
22031 per_cpu(current_task, cpu) = idle;
22032+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22033 #ifdef CONFIG_X86_32
22034 irq_ctx_init(cpu);
22035 #else
22036 clear_tsk_thread_flag(idle, TIF_FORK);
22037- per_cpu(kernel_stack, cpu) =
22038- (unsigned long)task_stack_page(idle) -
22039- KERNEL_STACK_OFFSET + THREAD_SIZE;
22040+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22041 #endif
22042 xen_setup_runstate_info(cpu);
22043 xen_setup_timer(cpu);
22044diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
22045--- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
22046+++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
22047@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22048 ESP_OFFSET=4 # bytes pushed onto stack
22049
22050 /*
22051- * Store vcpu_info pointer for easy access. Do it this way to
22052- * avoid having to reload %fs
22053+ * Store vcpu_info pointer for easy access.
22054 */
22055 #ifdef CONFIG_SMP
22056- GET_THREAD_INFO(%eax)
22057- movl TI_cpu(%eax), %eax
22058- movl __per_cpu_offset(,%eax,4), %eax
22059- mov xen_vcpu(%eax), %eax
22060+ push %fs
22061+ mov $(__KERNEL_PERCPU), %eax
22062+ mov %eax, %fs
22063+ mov PER_CPU_VAR(xen_vcpu), %eax
22064+ pop %fs
22065 #else
22066 movl xen_vcpu, %eax
22067 #endif
22068diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
22069--- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22070+++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22071@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22072 #ifdef CONFIG_X86_32
22073 mov %esi,xen_start_info
22074 mov $init_thread_union+THREAD_SIZE,%esp
22075+#ifdef CONFIG_SMP
22076+ movl $cpu_gdt_table,%edi
22077+ movl $__per_cpu_load,%eax
22078+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22079+ rorl $16,%eax
22080+ movb %al,__KERNEL_PERCPU + 4(%edi)
22081+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22082+ movl $__per_cpu_end - 1,%eax
22083+ subl $__per_cpu_start,%eax
22084+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22085+#endif
22086 #else
22087 mov %rsi,xen_start_info
22088 mov $init_thread_union+THREAD_SIZE,%rsp
22089diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
22090--- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22091+++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22092@@ -10,8 +10,6 @@
22093 extern const char xen_hypervisor_callback[];
22094 extern const char xen_failsafe_callback[];
22095
22096-extern void *xen_initial_gdt;
22097-
22098 struct trap_info;
22099 void xen_copy_trap_info(struct trap_info *traps);
22100
22101diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
22102--- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22103+++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22104@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22105 }
22106 EXPORT_SYMBOL(blk_iopoll_complete);
22107
22108-static void blk_iopoll_softirq(struct softirq_action *h)
22109+static void blk_iopoll_softirq(void)
22110 {
22111 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22112 int rearm = 0, budget = blk_iopoll_budget;
22113diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
22114--- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22115+++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22116@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22117 if (!len || !kbuf)
22118 return -EINVAL;
22119
22120- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22121+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22122 if (do_copy)
22123 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22124 else
22125diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
22126--- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22127+++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22128@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22129 * Softirq action handler - move entries to local list and loop over them
22130 * while passing them to the queue registered handler.
22131 */
22132-static void blk_done_softirq(struct softirq_action *h)
22133+static void blk_done_softirq(void)
22134 {
22135 struct list_head *cpu_list, local_list;
22136
22137diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
22138--- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22139+++ linux-3.0.4/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
22140@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22141 struct sg_io_v4 *hdr, struct bsg_device *bd,
22142 fmode_t has_write_perm)
22143 {
22144+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22145+ unsigned char *cmdptr;
22146+
22147 if (hdr->request_len > BLK_MAX_CDB) {
22148 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22149 if (!rq->cmd)
22150 return -ENOMEM;
22151- }
22152+ cmdptr = rq->cmd;
22153+ } else
22154+ cmdptr = tmpcmd;
22155
22156- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22157+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
22158 hdr->request_len))
22159 return -EFAULT;
22160
22161+ if (cmdptr != rq->cmd)
22162+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22163+
22164 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22165 if (blk_verify_command(rq->cmd, has_write_perm))
22166 return -EPERM;
22167diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
22168--- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22169+++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22170@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22171 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22172 struct sg_io_hdr *hdr, fmode_t mode)
22173 {
22174- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22175+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22176+ unsigned char *cmdptr;
22177+
22178+ if (rq->cmd != rq->__cmd)
22179+ cmdptr = rq->cmd;
22180+ else
22181+ cmdptr = tmpcmd;
22182+
22183+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22184 return -EFAULT;
22185+
22186+ if (cmdptr != rq->cmd)
22187+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22188+
22189 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22190 return -EPERM;
22191
22192@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22193 int err;
22194 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22195 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22196+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22197+ unsigned char *cmdptr;
22198
22199 if (!sic)
22200 return -EINVAL;
22201@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22202 */
22203 err = -EFAULT;
22204 rq->cmd_len = cmdlen;
22205- if (copy_from_user(rq->cmd, sic->data, cmdlen))
22206+
22207+ if (rq->cmd != rq->__cmd)
22208+ cmdptr = rq->cmd;
22209+ else
22210+ cmdptr = tmpcmd;
22211+
22212+ if (copy_from_user(cmdptr, sic->data, cmdlen))
22213 goto error;
22214
22215+ if (rq->cmd != cmdptr)
22216+ memcpy(rq->cmd, cmdptr, cmdlen);
22217+
22218 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22219 goto error;
22220
22221diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
22222--- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22223+++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22224@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22225
22226 struct cryptd_blkcipher_request_ctx {
22227 crypto_completion_t complete;
22228-};
22229+} __no_const;
22230
22231 struct cryptd_hash_ctx {
22232 struct crypto_shash *child;
22233@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22234
22235 struct cryptd_aead_request_ctx {
22236 crypto_completion_t complete;
22237-};
22238+} __no_const;
22239
22240 static void cryptd_queue_worker(struct work_struct *work);
22241
22242diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
22243--- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22244+++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22245@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22246 for (i = 0; i < 7; ++i)
22247 gf128mul_x_lle(&p[i + 1], &p[i]);
22248
22249- memset(r, 0, sizeof(r));
22250+ memset(r, 0, sizeof(*r));
22251 for (i = 0;;) {
22252 u8 ch = ((u8 *)b)[15 - i];
22253
22254@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22255 for (i = 0; i < 7; ++i)
22256 gf128mul_x_bbe(&p[i + 1], &p[i]);
22257
22258- memset(r, 0, sizeof(r));
22259+ memset(r, 0, sizeof(*r));
22260 for (i = 0;;) {
22261 u8 ch = ((u8 *)b)[i];
22262
22263diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
22264--- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22265+++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22266@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22267 u32 r0,r1,r2,r3,r4;
22268 int i;
22269
22270+ pax_track_stack();
22271+
22272 /* Copy key, add padding */
22273
22274 for (i = 0; i < keylen; ++i)
22275diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
22276--- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22277+++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
22278@@ -5,6 +5,7 @@
22279 *.cis
22280 *.cpio
22281 *.csp
22282+*.dbg
22283 *.dsp
22284 *.dvi
22285 *.elf
22286@@ -48,9 +49,11 @@
22287 *.tab.h
22288 *.tex
22289 *.ver
22290+*.vim
22291 *.xml
22292 *.xz
22293 *_MODULES
22294+*_reg_safe.h
22295 *_vga16.c
22296 *~
22297 \#*#
22298@@ -70,6 +73,7 @@ Kerntypes
22299 Module.markers
22300 Module.symvers
22301 PENDING
22302+PERF*
22303 SCCS
22304 System.map*
22305 TAGS
22306@@ -98,6 +102,8 @@ bzImage*
22307 capability_names.h
22308 capflags.c
22309 classlist.h*
22310+clut_vga16.c
22311+common-cmds.h
22312 comp*.log
22313 compile.h*
22314 conf
22315@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22316 gconf
22317 gconf.glade.h
22318 gen-devlist
22319+gen-kdb_cmds.c
22320 gen_crc32table
22321 gen_init_cpio
22322 generated
22323 genheaders
22324 genksyms
22325 *_gray256.c
22326+hash
22327 hpet_example
22328 hugepage-mmap
22329 hugepage-shm
22330@@ -146,7 +154,6 @@ int32.c
22331 int4.c
22332 int8.c
22333 kallsyms
22334-kconfig
22335 keywords.c
22336 ksym.c*
22337 ksym.h*
22338@@ -154,7 +161,6 @@ kxgettext
22339 lkc_defs.h
22340 lex.c
22341 lex.*.c
22342-linux
22343 logo_*.c
22344 logo_*_clut224.c
22345 logo_*_mono.c
22346@@ -174,6 +180,7 @@ mkboot
22347 mkbugboot
22348 mkcpustr
22349 mkdep
22350+mkpiggy
22351 mkprep
22352 mkregtable
22353 mktables
22354@@ -209,6 +216,7 @@ r300_reg_safe.h
22355 r420_reg_safe.h
22356 r600_reg_safe.h
22357 recordmcount
22358+regdb.c
22359 relocs
22360 rlim_names.h
22361 rn50_reg_safe.h
22362@@ -219,6 +227,7 @@ setup
22363 setup.bin
22364 setup.elf
22365 sImage
22366+slabinfo
22367 sm_tbl*
22368 split-include
22369 syscalltab.h
22370@@ -246,7 +255,9 @@ vmlinux
22371 vmlinux-*
22372 vmlinux.aout
22373 vmlinux.bin.all
22374+vmlinux.bin.bz2
22375 vmlinux.lds
22376+vmlinux.relocs
22377 vmlinuz
22378 voffset.h
22379 vsyscall.lds
22380@@ -254,6 +265,7 @@ vsyscall_32.lds
22381 wanxlfw.inc
22382 uImage
22383 unifdef
22384+utsrelease.h
22385 wakeup.bin
22386 wakeup.elf
22387 wakeup.lds
22388diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
22389--- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22390+++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22391@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22392 the specified number of seconds. This is to be used if
22393 your oopses keep scrolling off the screen.
22394
22395+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22396+ virtualization environments that don't cope well with the
22397+ expand down segment used by UDEREF on X86-32 or the frequent
22398+ page table updates on X86-64.
22399+
22400+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22401+
22402 pcbit= [HW,ISDN]
22403
22404 pcd. [PARIDE]
22405diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
22406--- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22407+++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22408@@ -38,12 +38,12 @@
22409 */
22410 u64 cper_next_record_id(void)
22411 {
22412- static atomic64_t seq;
22413+ static atomic64_unchecked_t seq;
22414
22415- if (!atomic64_read(&seq))
22416- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22417+ if (!atomic64_read_unchecked(&seq))
22418+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22419
22420- return atomic64_inc_return(&seq);
22421+ return atomic64_inc_return_unchecked(&seq);
22422 }
22423 EXPORT_SYMBOL_GPL(cper_next_record_id);
22424
22425diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
22426--- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22427+++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22428@@ -11,6 +11,7 @@
22429 #include <linux/kernel.h>
22430 #include <linux/acpi.h>
22431 #include <linux/debugfs.h>
22432+#include <asm/uaccess.h>
22433 #include "internal.h"
22434
22435 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22436@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22437 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22438 */
22439 unsigned int size = EC_SPACE_SIZE;
22440- u8 *data = (u8 *) buf;
22441+ u8 data;
22442 loff_t init_off = *off;
22443 int err = 0;
22444
22445@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22446 size = count;
22447
22448 while (size) {
22449- err = ec_read(*off, &data[*off - init_off]);
22450+ err = ec_read(*off, &data);
22451 if (err)
22452 return err;
22453+ if (put_user(data, &buf[*off - init_off]))
22454+ return -EFAULT;
22455 *off += 1;
22456 size--;
22457 }
22458@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
22459
22460 unsigned int size = count;
22461 loff_t init_off = *off;
22462- u8 *data = (u8 *) buf;
22463 int err = 0;
22464
22465 if (*off >= EC_SPACE_SIZE)
22466@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
22467 }
22468
22469 while (size) {
22470- u8 byte_write = data[*off - init_off];
22471+ u8 byte_write;
22472+ if (get_user(byte_write, &buf[*off - init_off]))
22473+ return -EFAULT;
22474 err = ec_write(*off, byte_write);
22475 if (err)
22476 return err;
22477diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
22478--- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
22479+++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
22480@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22481 size_t count, loff_t * ppos)
22482 {
22483 struct list_head *node, *next;
22484- char strbuf[5];
22485- char str[5] = "";
22486- unsigned int len = count;
22487-
22488- if (len > 4)
22489- len = 4;
22490- if (len < 0)
22491- return -EFAULT;
22492+ char strbuf[5] = {0};
22493
22494- if (copy_from_user(strbuf, buffer, len))
22495+ if (count > 4)
22496+ count = 4;
22497+ if (copy_from_user(strbuf, buffer, count))
22498 return -EFAULT;
22499- strbuf[len] = '\0';
22500- sscanf(strbuf, "%s", str);
22501+ strbuf[count] = '\0';
22502
22503 mutex_lock(&acpi_device_lock);
22504 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22505@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22506 if (!dev->wakeup.flags.valid)
22507 continue;
22508
22509- if (!strncmp(dev->pnp.bus_id, str, 4)) {
22510+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22511 if (device_can_wakeup(&dev->dev)) {
22512 bool enable = !device_may_wakeup(&dev->dev);
22513 device_set_wakeup_enable(&dev->dev, enable);
22514diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
22515--- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
22516+++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
22517@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22518 return 0;
22519 #endif
22520
22521- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22522+ BUG_ON(pr->id >= nr_cpu_ids);
22523
22524 /*
22525 * Buggy BIOS check
22526diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
22527--- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
22528+++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
22529@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
22530 struct ata_port *ap;
22531 unsigned int tag;
22532
22533- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22534+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22535 ap = qc->ap;
22536
22537 qc->flags = 0;
22538@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
22539 struct ata_port *ap;
22540 struct ata_link *link;
22541
22542- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22543+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22544 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22545 ap = qc->ap;
22546 link = qc->dev->link;
22547@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
22548 return;
22549
22550 spin_lock(&lock);
22551+ pax_open_kernel();
22552
22553 for (cur = ops->inherits; cur; cur = cur->inherits) {
22554 void **inherit = (void **)cur;
22555@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22556 if (IS_ERR(*pp))
22557 *pp = NULL;
22558
22559- ops->inherits = NULL;
22560+ *(struct ata_port_operations **)&ops->inherits = NULL;
22561
22562+ pax_close_kernel();
22563 spin_unlock(&lock);
22564 }
22565
22566diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22567--- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22568+++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22569@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22570 {
22571 struct ata_link *link;
22572
22573+ pax_track_stack();
22574+
22575 ata_for_each_link(link, ap, HOST_FIRST)
22576 ata_eh_link_report(link);
22577 }
22578diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22579--- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22580+++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22581@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22582 /* Handle platform specific quirks */
22583 if (pdata->quirk) {
22584 if (pdata->quirk & CF_BROKEN_PIO) {
22585- ap->ops->set_piomode = NULL;
22586+ pax_open_kernel();
22587+ *(void **)&ap->ops->set_piomode = NULL;
22588+ pax_close_kernel();
22589 ap->pio_mask = 0;
22590 }
22591 if (pdata->quirk & CF_BROKEN_MWDMA)
22592diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22593--- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22594+++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22595@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22596 vcc->pop(vcc, skb);
22597 else
22598 dev_kfree_skb_any(skb);
22599- atomic_inc(&vcc->stats->tx);
22600+ atomic_inc_unchecked(&vcc->stats->tx);
22601
22602 return 0;
22603 }
22604diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22605--- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22606+++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22607@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22608 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22609
22610 // VC layer stats
22611- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22612+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22613
22614 // free the descriptor
22615 kfree (tx_descr);
22616@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22617 dump_skb ("<<<", vc, skb);
22618
22619 // VC layer stats
22620- atomic_inc(&atm_vcc->stats->rx);
22621+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22622 __net_timestamp(skb);
22623 // end of our responsibility
22624 atm_vcc->push (atm_vcc, skb);
22625@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22626 } else {
22627 PRINTK (KERN_INFO, "dropped over-size frame");
22628 // should we count this?
22629- atomic_inc(&atm_vcc->stats->rx_drop);
22630+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22631 }
22632
22633 } else {
22634@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22635 }
22636
22637 if (check_area (skb->data, skb->len)) {
22638- atomic_inc(&atm_vcc->stats->tx_err);
22639+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22640 return -ENOMEM; // ?
22641 }
22642
22643diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22644--- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22645+++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22646@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22647 if (vcc->pop) vcc->pop(vcc,skb);
22648 else dev_kfree_skb(skb);
22649 if (dev_data) return 0;
22650- atomic_inc(&vcc->stats->tx_err);
22651+ atomic_inc_unchecked(&vcc->stats->tx_err);
22652 return -ENOLINK;
22653 }
22654 size = skb->len+sizeof(struct atmtcp_hdr);
22655@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22656 if (!new_skb) {
22657 if (vcc->pop) vcc->pop(vcc,skb);
22658 else dev_kfree_skb(skb);
22659- atomic_inc(&vcc->stats->tx_err);
22660+ atomic_inc_unchecked(&vcc->stats->tx_err);
22661 return -ENOBUFS;
22662 }
22663 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22664@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22665 if (vcc->pop) vcc->pop(vcc,skb);
22666 else dev_kfree_skb(skb);
22667 out_vcc->push(out_vcc,new_skb);
22668- atomic_inc(&vcc->stats->tx);
22669- atomic_inc(&out_vcc->stats->rx);
22670+ atomic_inc_unchecked(&vcc->stats->tx);
22671+ atomic_inc_unchecked(&out_vcc->stats->rx);
22672 return 0;
22673 }
22674
22675@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22676 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22677 read_unlock(&vcc_sklist_lock);
22678 if (!out_vcc) {
22679- atomic_inc(&vcc->stats->tx_err);
22680+ atomic_inc_unchecked(&vcc->stats->tx_err);
22681 goto done;
22682 }
22683 skb_pull(skb,sizeof(struct atmtcp_hdr));
22684@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22685 __net_timestamp(new_skb);
22686 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22687 out_vcc->push(out_vcc,new_skb);
22688- atomic_inc(&vcc->stats->tx);
22689- atomic_inc(&out_vcc->stats->rx);
22690+ atomic_inc_unchecked(&vcc->stats->tx);
22691+ atomic_inc_unchecked(&out_vcc->stats->rx);
22692 done:
22693 if (vcc->pop) vcc->pop(vcc,skb);
22694 else dev_kfree_skb(skb);
22695diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22696--- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22697+++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22698@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22699 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22700 vcc->dev->number);
22701 length = 0;
22702- atomic_inc(&vcc->stats->rx_err);
22703+ atomic_inc_unchecked(&vcc->stats->rx_err);
22704 }
22705 else {
22706 length = ATM_CELL_SIZE-1; /* no HEC */
22707@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22708 size);
22709 }
22710 eff = length = 0;
22711- atomic_inc(&vcc->stats->rx_err);
22712+ atomic_inc_unchecked(&vcc->stats->rx_err);
22713 }
22714 else {
22715 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22716@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22717 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22718 vcc->dev->number,vcc->vci,length,size << 2,descr);
22719 length = eff = 0;
22720- atomic_inc(&vcc->stats->rx_err);
22721+ atomic_inc_unchecked(&vcc->stats->rx_err);
22722 }
22723 }
22724 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22725@@ -771,7 +771,7 @@ rx_dequeued++;
22726 vcc->push(vcc,skb);
22727 pushed++;
22728 }
22729- atomic_inc(&vcc->stats->rx);
22730+ atomic_inc_unchecked(&vcc->stats->rx);
22731 }
22732 wake_up(&eni_dev->rx_wait);
22733 }
22734@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22735 PCI_DMA_TODEVICE);
22736 if (vcc->pop) vcc->pop(vcc,skb);
22737 else dev_kfree_skb_irq(skb);
22738- atomic_inc(&vcc->stats->tx);
22739+ atomic_inc_unchecked(&vcc->stats->tx);
22740 wake_up(&eni_dev->tx_wait);
22741 dma_complete++;
22742 }
22743diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22744--- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22745+++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22746@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22747 }
22748 }
22749
22750- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22751+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22752
22753 fs_dprintk (FS_DEBUG_TXMEM, "i");
22754 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22755@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22756 #endif
22757 skb_put (skb, qe->p1 & 0xffff);
22758 ATM_SKB(skb)->vcc = atm_vcc;
22759- atomic_inc(&atm_vcc->stats->rx);
22760+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22761 __net_timestamp(skb);
22762 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22763 atm_vcc->push (atm_vcc, skb);
22764@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22765 kfree (pe);
22766 }
22767 if (atm_vcc)
22768- atomic_inc(&atm_vcc->stats->rx_drop);
22769+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22770 break;
22771 case 0x1f: /* Reassembly abort: no buffers. */
22772 /* Silently increment error counter. */
22773 if (atm_vcc)
22774- atomic_inc(&atm_vcc->stats->rx_drop);
22775+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22776 break;
22777 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22778 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22779diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22780--- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22781+++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22782@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22783 #endif
22784 /* check error condition */
22785 if (*entry->status & STATUS_ERROR)
22786- atomic_inc(&vcc->stats->tx_err);
22787+ atomic_inc_unchecked(&vcc->stats->tx_err);
22788 else
22789- atomic_inc(&vcc->stats->tx);
22790+ atomic_inc_unchecked(&vcc->stats->tx);
22791 }
22792 }
22793
22794@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22795 if (skb == NULL) {
22796 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22797
22798- atomic_inc(&vcc->stats->rx_drop);
22799+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22800 return -ENOMEM;
22801 }
22802
22803@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22804
22805 dev_kfree_skb_any(skb);
22806
22807- atomic_inc(&vcc->stats->rx_drop);
22808+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22809 return -ENOMEM;
22810 }
22811
22812 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22813
22814 vcc->push(vcc, skb);
22815- atomic_inc(&vcc->stats->rx);
22816+ atomic_inc_unchecked(&vcc->stats->rx);
22817
22818 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22819
22820@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22821 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22822 fore200e->atm_dev->number,
22823 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22824- atomic_inc(&vcc->stats->rx_err);
22825+ atomic_inc_unchecked(&vcc->stats->rx_err);
22826 }
22827 }
22828
22829@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22830 goto retry_here;
22831 }
22832
22833- atomic_inc(&vcc->stats->tx_err);
22834+ atomic_inc_unchecked(&vcc->stats->tx_err);
22835
22836 fore200e->tx_sat++;
22837 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22838diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22839--- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22840+++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22841@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22842
22843 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22844 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22845- atomic_inc(&vcc->stats->rx_drop);
22846+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22847 goto return_host_buffers;
22848 }
22849
22850@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22851 RBRQ_LEN_ERR(he_dev->rbrq_head)
22852 ? "LEN_ERR" : "",
22853 vcc->vpi, vcc->vci);
22854- atomic_inc(&vcc->stats->rx_err);
22855+ atomic_inc_unchecked(&vcc->stats->rx_err);
22856 goto return_host_buffers;
22857 }
22858
22859@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22860 vcc->push(vcc, skb);
22861 spin_lock(&he_dev->global_lock);
22862
22863- atomic_inc(&vcc->stats->rx);
22864+ atomic_inc_unchecked(&vcc->stats->rx);
22865
22866 return_host_buffers:
22867 ++pdus_assembled;
22868@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22869 tpd->vcc->pop(tpd->vcc, tpd->skb);
22870 else
22871 dev_kfree_skb_any(tpd->skb);
22872- atomic_inc(&tpd->vcc->stats->tx_err);
22873+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22874 }
22875 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22876 return;
22877@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22878 vcc->pop(vcc, skb);
22879 else
22880 dev_kfree_skb_any(skb);
22881- atomic_inc(&vcc->stats->tx_err);
22882+ atomic_inc_unchecked(&vcc->stats->tx_err);
22883 return -EINVAL;
22884 }
22885
22886@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22887 vcc->pop(vcc, skb);
22888 else
22889 dev_kfree_skb_any(skb);
22890- atomic_inc(&vcc->stats->tx_err);
22891+ atomic_inc_unchecked(&vcc->stats->tx_err);
22892 return -EINVAL;
22893 }
22894 #endif
22895@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22896 vcc->pop(vcc, skb);
22897 else
22898 dev_kfree_skb_any(skb);
22899- atomic_inc(&vcc->stats->tx_err);
22900+ atomic_inc_unchecked(&vcc->stats->tx_err);
22901 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22902 return -ENOMEM;
22903 }
22904@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22905 vcc->pop(vcc, skb);
22906 else
22907 dev_kfree_skb_any(skb);
22908- atomic_inc(&vcc->stats->tx_err);
22909+ atomic_inc_unchecked(&vcc->stats->tx_err);
22910 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22911 return -ENOMEM;
22912 }
22913@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22914 __enqueue_tpd(he_dev, tpd, cid);
22915 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22916
22917- atomic_inc(&vcc->stats->tx);
22918+ atomic_inc_unchecked(&vcc->stats->tx);
22919
22920 return 0;
22921 }
22922diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22923--- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22924+++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22925@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22926 {
22927 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22928 // VC layer stats
22929- atomic_inc(&vcc->stats->rx);
22930+ atomic_inc_unchecked(&vcc->stats->rx);
22931 __net_timestamp(skb);
22932 // end of our responsibility
22933 vcc->push (vcc, skb);
22934@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22935 dev->tx_iovec = NULL;
22936
22937 // VC layer stats
22938- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22939+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22940
22941 // free the skb
22942 hrz_kfree_skb (skb);
22943diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22944--- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22945+++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22946@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22947 else
22948 dev_kfree_skb(skb);
22949
22950- atomic_inc(&vcc->stats->tx);
22951+ atomic_inc_unchecked(&vcc->stats->tx);
22952 }
22953
22954 atomic_dec(&scq->used);
22955@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22956 if ((sb = dev_alloc_skb(64)) == NULL) {
22957 printk("%s: Can't allocate buffers for aal0.\n",
22958 card->name);
22959- atomic_add(i, &vcc->stats->rx_drop);
22960+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22961 break;
22962 }
22963 if (!atm_charge(vcc, sb->truesize)) {
22964 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22965 card->name);
22966- atomic_add(i - 1, &vcc->stats->rx_drop);
22967+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22968 dev_kfree_skb(sb);
22969 break;
22970 }
22971@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22972 ATM_SKB(sb)->vcc = vcc;
22973 __net_timestamp(sb);
22974 vcc->push(vcc, sb);
22975- atomic_inc(&vcc->stats->rx);
22976+ atomic_inc_unchecked(&vcc->stats->rx);
22977
22978 cell += ATM_CELL_PAYLOAD;
22979 }
22980@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22981 "(CDC: %08x)\n",
22982 card->name, len, rpp->len, readl(SAR_REG_CDC));
22983 recycle_rx_pool_skb(card, rpp);
22984- atomic_inc(&vcc->stats->rx_err);
22985+ atomic_inc_unchecked(&vcc->stats->rx_err);
22986 return;
22987 }
22988 if (stat & SAR_RSQE_CRC) {
22989 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22990 recycle_rx_pool_skb(card, rpp);
22991- atomic_inc(&vcc->stats->rx_err);
22992+ atomic_inc_unchecked(&vcc->stats->rx_err);
22993 return;
22994 }
22995 if (skb_queue_len(&rpp->queue) > 1) {
22996@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22997 RXPRINTK("%s: Can't alloc RX skb.\n",
22998 card->name);
22999 recycle_rx_pool_skb(card, rpp);
23000- atomic_inc(&vcc->stats->rx_err);
23001+ atomic_inc_unchecked(&vcc->stats->rx_err);
23002 return;
23003 }
23004 if (!atm_charge(vcc, skb->truesize)) {
23005@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23006 __net_timestamp(skb);
23007
23008 vcc->push(vcc, skb);
23009- atomic_inc(&vcc->stats->rx);
23010+ atomic_inc_unchecked(&vcc->stats->rx);
23011
23012 return;
23013 }
23014@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23015 __net_timestamp(skb);
23016
23017 vcc->push(vcc, skb);
23018- atomic_inc(&vcc->stats->rx);
23019+ atomic_inc_unchecked(&vcc->stats->rx);
23020
23021 if (skb->truesize > SAR_FB_SIZE_3)
23022 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23023@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23024 if (vcc->qos.aal != ATM_AAL0) {
23025 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23026 card->name, vpi, vci);
23027- atomic_inc(&vcc->stats->rx_drop);
23028+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23029 goto drop;
23030 }
23031
23032 if ((sb = dev_alloc_skb(64)) == NULL) {
23033 printk("%s: Can't allocate buffers for AAL0.\n",
23034 card->name);
23035- atomic_inc(&vcc->stats->rx_err);
23036+ atomic_inc_unchecked(&vcc->stats->rx_err);
23037 goto drop;
23038 }
23039
23040@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23041 ATM_SKB(sb)->vcc = vcc;
23042 __net_timestamp(sb);
23043 vcc->push(vcc, sb);
23044- atomic_inc(&vcc->stats->rx);
23045+ atomic_inc_unchecked(&vcc->stats->rx);
23046
23047 drop:
23048 skb_pull(queue, 64);
23049@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23050
23051 if (vc == NULL) {
23052 printk("%s: NULL connection in send().\n", card->name);
23053- atomic_inc(&vcc->stats->tx_err);
23054+ atomic_inc_unchecked(&vcc->stats->tx_err);
23055 dev_kfree_skb(skb);
23056 return -EINVAL;
23057 }
23058 if (!test_bit(VCF_TX, &vc->flags)) {
23059 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23060- atomic_inc(&vcc->stats->tx_err);
23061+ atomic_inc_unchecked(&vcc->stats->tx_err);
23062 dev_kfree_skb(skb);
23063 return -EINVAL;
23064 }
23065@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23066 break;
23067 default:
23068 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23069- atomic_inc(&vcc->stats->tx_err);
23070+ atomic_inc_unchecked(&vcc->stats->tx_err);
23071 dev_kfree_skb(skb);
23072 return -EINVAL;
23073 }
23074
23075 if (skb_shinfo(skb)->nr_frags != 0) {
23076 printk("%s: No scatter-gather yet.\n", card->name);
23077- atomic_inc(&vcc->stats->tx_err);
23078+ atomic_inc_unchecked(&vcc->stats->tx_err);
23079 dev_kfree_skb(skb);
23080 return -EINVAL;
23081 }
23082@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23083
23084 err = queue_skb(card, vc, skb, oam);
23085 if (err) {
23086- atomic_inc(&vcc->stats->tx_err);
23087+ atomic_inc_unchecked(&vcc->stats->tx_err);
23088 dev_kfree_skb(skb);
23089 return err;
23090 }
23091@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23092 skb = dev_alloc_skb(64);
23093 if (!skb) {
23094 printk("%s: Out of memory in send_oam().\n", card->name);
23095- atomic_inc(&vcc->stats->tx_err);
23096+ atomic_inc_unchecked(&vcc->stats->tx_err);
23097 return -ENOMEM;
23098 }
23099 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23100diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
23101--- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23102+++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23103@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23104 status = (u_short) (buf_desc_ptr->desc_mode);
23105 if (status & (RX_CER | RX_PTE | RX_OFL))
23106 {
23107- atomic_inc(&vcc->stats->rx_err);
23108+ atomic_inc_unchecked(&vcc->stats->rx_err);
23109 IF_ERR(printk("IA: bad packet, dropping it");)
23110 if (status & RX_CER) {
23111 IF_ERR(printk(" cause: packet CRC error\n");)
23112@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23113 len = dma_addr - buf_addr;
23114 if (len > iadev->rx_buf_sz) {
23115 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23116- atomic_inc(&vcc->stats->rx_err);
23117+ atomic_inc_unchecked(&vcc->stats->rx_err);
23118 goto out_free_desc;
23119 }
23120
23121@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23122 ia_vcc = INPH_IA_VCC(vcc);
23123 if (ia_vcc == NULL)
23124 {
23125- atomic_inc(&vcc->stats->rx_err);
23126+ atomic_inc_unchecked(&vcc->stats->rx_err);
23127 dev_kfree_skb_any(skb);
23128 atm_return(vcc, atm_guess_pdu2truesize(len));
23129 goto INCR_DLE;
23130@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23131 if ((length > iadev->rx_buf_sz) || (length >
23132 (skb->len - sizeof(struct cpcs_trailer))))
23133 {
23134- atomic_inc(&vcc->stats->rx_err);
23135+ atomic_inc_unchecked(&vcc->stats->rx_err);
23136 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23137 length, skb->len);)
23138 dev_kfree_skb_any(skb);
23139@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23140
23141 IF_RX(printk("rx_dle_intr: skb push");)
23142 vcc->push(vcc,skb);
23143- atomic_inc(&vcc->stats->rx);
23144+ atomic_inc_unchecked(&vcc->stats->rx);
23145 iadev->rx_pkt_cnt++;
23146 }
23147 INCR_DLE:
23148@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23149 {
23150 struct k_sonet_stats *stats;
23151 stats = &PRIV(_ia_dev[board])->sonet_stats;
23152- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23153- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23154- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23155- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23156- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23157- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23158- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23159- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23160- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23161+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23162+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23163+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23164+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23165+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23166+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23167+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23168+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23169+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23170 }
23171 ia_cmds.status = 0;
23172 break;
23173@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23174 if ((desc == 0) || (desc > iadev->num_tx_desc))
23175 {
23176 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23177- atomic_inc(&vcc->stats->tx);
23178+ atomic_inc_unchecked(&vcc->stats->tx);
23179 if (vcc->pop)
23180 vcc->pop(vcc, skb);
23181 else
23182@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23183 ATM_DESC(skb) = vcc->vci;
23184 skb_queue_tail(&iadev->tx_dma_q, skb);
23185
23186- atomic_inc(&vcc->stats->tx);
23187+ atomic_inc_unchecked(&vcc->stats->tx);
23188 iadev->tx_pkt_cnt++;
23189 /* Increment transaction counter */
23190 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23191
23192 #if 0
23193 /* add flow control logic */
23194- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23195+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23196 if (iavcc->vc_desc_cnt > 10) {
23197 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23198 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23199diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
23200--- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23201+++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23202@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23203 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23204 lanai_endtx(lanai, lvcc);
23205 lanai_free_skb(lvcc->tx.atmvcc, skb);
23206- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23207+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23208 }
23209
23210 /* Try to fill the buffer - don't call unless there is backlog */
23211@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23212 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23213 __net_timestamp(skb);
23214 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23215- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23216+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23217 out:
23218 lvcc->rx.buf.ptr = end;
23219 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23220@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23221 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23222 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23223 lanai->stats.service_rxnotaal5++;
23224- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23225+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23226 return 0;
23227 }
23228 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23229@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23230 int bytes;
23231 read_unlock(&vcc_sklist_lock);
23232 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23233- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23234+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23235 lvcc->stats.x.aal5.service_trash++;
23236 bytes = (SERVICE_GET_END(s) * 16) -
23237 (((unsigned long) lvcc->rx.buf.ptr) -
23238@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23239 }
23240 if (s & SERVICE_STREAM) {
23241 read_unlock(&vcc_sklist_lock);
23242- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23243+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23244 lvcc->stats.x.aal5.service_stream++;
23245 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23246 "PDU on VCI %d!\n", lanai->number, vci);
23247@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23248 return 0;
23249 }
23250 DPRINTK("got rx crc error on vci %d\n", vci);
23251- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23252+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23253 lvcc->stats.x.aal5.service_rxcrc++;
23254 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23255 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23256diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
23257--- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23258+++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23259@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23260 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23261 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23262 card->index);
23263- atomic_inc(&vcc->stats->tx_err);
23264+ atomic_inc_unchecked(&vcc->stats->tx_err);
23265 dev_kfree_skb_any(skb);
23266 return -EINVAL;
23267 }
23268@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23269 if (!vc->tx) {
23270 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23271 card->index);
23272- atomic_inc(&vcc->stats->tx_err);
23273+ atomic_inc_unchecked(&vcc->stats->tx_err);
23274 dev_kfree_skb_any(skb);
23275 return -EINVAL;
23276 }
23277@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23278 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23279 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23280 card->index);
23281- atomic_inc(&vcc->stats->tx_err);
23282+ atomic_inc_unchecked(&vcc->stats->tx_err);
23283 dev_kfree_skb_any(skb);
23284 return -EINVAL;
23285 }
23286
23287 if (skb_shinfo(skb)->nr_frags != 0) {
23288 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23289- atomic_inc(&vcc->stats->tx_err);
23290+ atomic_inc_unchecked(&vcc->stats->tx_err);
23291 dev_kfree_skb_any(skb);
23292 return -EINVAL;
23293 }
23294@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23295 }
23296
23297 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23298- atomic_inc(&vcc->stats->tx_err);
23299+ atomic_inc_unchecked(&vcc->stats->tx_err);
23300 dev_kfree_skb_any(skb);
23301 return -EIO;
23302 }
23303- atomic_inc(&vcc->stats->tx);
23304+ atomic_inc_unchecked(&vcc->stats->tx);
23305
23306 return 0;
23307 }
23308@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23309 printk
23310 ("nicstar%d: Can't allocate buffers for aal0.\n",
23311 card->index);
23312- atomic_add(i, &vcc->stats->rx_drop);
23313+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23314 break;
23315 }
23316 if (!atm_charge(vcc, sb->truesize)) {
23317 RXPRINTK
23318 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23319 card->index);
23320- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23321+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23322 dev_kfree_skb_any(sb);
23323 break;
23324 }
23325@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23326 ATM_SKB(sb)->vcc = vcc;
23327 __net_timestamp(sb);
23328 vcc->push(vcc, sb);
23329- atomic_inc(&vcc->stats->rx);
23330+ atomic_inc_unchecked(&vcc->stats->rx);
23331 cell += ATM_CELL_PAYLOAD;
23332 }
23333
23334@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23335 if (iovb == NULL) {
23336 printk("nicstar%d: Out of iovec buffers.\n",
23337 card->index);
23338- atomic_inc(&vcc->stats->rx_drop);
23339+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23340 recycle_rx_buf(card, skb);
23341 return;
23342 }
23343@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23344 small or large buffer itself. */
23345 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23346 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23347- atomic_inc(&vcc->stats->rx_err);
23348+ atomic_inc_unchecked(&vcc->stats->rx_err);
23349 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23350 NS_MAX_IOVECS);
23351 NS_PRV_IOVCNT(iovb) = 0;
23352@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23353 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23354 card->index);
23355 which_list(card, skb);
23356- atomic_inc(&vcc->stats->rx_err);
23357+ atomic_inc_unchecked(&vcc->stats->rx_err);
23358 recycle_rx_buf(card, skb);
23359 vc->rx_iov = NULL;
23360 recycle_iov_buf(card, iovb);
23361@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23362 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23363 card->index);
23364 which_list(card, skb);
23365- atomic_inc(&vcc->stats->rx_err);
23366+ atomic_inc_unchecked(&vcc->stats->rx_err);
23367 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23368 NS_PRV_IOVCNT(iovb));
23369 vc->rx_iov = NULL;
23370@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23371 printk(" - PDU size mismatch.\n");
23372 else
23373 printk(".\n");
23374- atomic_inc(&vcc->stats->rx_err);
23375+ atomic_inc_unchecked(&vcc->stats->rx_err);
23376 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23377 NS_PRV_IOVCNT(iovb));
23378 vc->rx_iov = NULL;
23379@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23380 /* skb points to a small buffer */
23381 if (!atm_charge(vcc, skb->truesize)) {
23382 push_rxbufs(card, skb);
23383- atomic_inc(&vcc->stats->rx_drop);
23384+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23385 } else {
23386 skb_put(skb, len);
23387 dequeue_sm_buf(card, skb);
23388@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23389 ATM_SKB(skb)->vcc = vcc;
23390 __net_timestamp(skb);
23391 vcc->push(vcc, skb);
23392- atomic_inc(&vcc->stats->rx);
23393+ atomic_inc_unchecked(&vcc->stats->rx);
23394 }
23395 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23396 struct sk_buff *sb;
23397@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23398 if (len <= NS_SMBUFSIZE) {
23399 if (!atm_charge(vcc, sb->truesize)) {
23400 push_rxbufs(card, sb);
23401- atomic_inc(&vcc->stats->rx_drop);
23402+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23403 } else {
23404 skb_put(sb, len);
23405 dequeue_sm_buf(card, sb);
23406@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23407 ATM_SKB(sb)->vcc = vcc;
23408 __net_timestamp(sb);
23409 vcc->push(vcc, sb);
23410- atomic_inc(&vcc->stats->rx);
23411+ atomic_inc_unchecked(&vcc->stats->rx);
23412 }
23413
23414 push_rxbufs(card, skb);
23415@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23416
23417 if (!atm_charge(vcc, skb->truesize)) {
23418 push_rxbufs(card, skb);
23419- atomic_inc(&vcc->stats->rx_drop);
23420+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23421 } else {
23422 dequeue_lg_buf(card, skb);
23423 #ifdef NS_USE_DESTRUCTORS
23424@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23425 ATM_SKB(skb)->vcc = vcc;
23426 __net_timestamp(skb);
23427 vcc->push(vcc, skb);
23428- atomic_inc(&vcc->stats->rx);
23429+ atomic_inc_unchecked(&vcc->stats->rx);
23430 }
23431
23432 push_rxbufs(card, sb);
23433@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23434 printk
23435 ("nicstar%d: Out of huge buffers.\n",
23436 card->index);
23437- atomic_inc(&vcc->stats->rx_drop);
23438+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23439 recycle_iovec_rx_bufs(card,
23440 (struct iovec *)
23441 iovb->data,
23442@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23443 card->hbpool.count++;
23444 } else
23445 dev_kfree_skb_any(hb);
23446- atomic_inc(&vcc->stats->rx_drop);
23447+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23448 } else {
23449 /* Copy the small buffer to the huge buffer */
23450 sb = (struct sk_buff *)iov->iov_base;
23451@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23452 #endif /* NS_USE_DESTRUCTORS */
23453 __net_timestamp(hb);
23454 vcc->push(vcc, hb);
23455- atomic_inc(&vcc->stats->rx);
23456+ atomic_inc_unchecked(&vcc->stats->rx);
23457 }
23458 }
23459
23460diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
23461--- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
23462+++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
23463@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
23464 }
23465 atm_charge(vcc, skb->truesize);
23466 vcc->push(vcc, skb);
23467- atomic_inc(&vcc->stats->rx);
23468+ atomic_inc_unchecked(&vcc->stats->rx);
23469 break;
23470
23471 case PKT_STATUS:
23472@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
23473 char msg[500];
23474 char item[10];
23475
23476+ pax_track_stack();
23477+
23478 len = buf->len;
23479 for (i = 0; i < len; i++){
23480 if(i % 8 == 0)
23481@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
23482 vcc = SKB_CB(oldskb)->vcc;
23483
23484 if (vcc) {
23485- atomic_inc(&vcc->stats->tx);
23486+ atomic_inc_unchecked(&vcc->stats->tx);
23487 solos_pop(vcc, oldskb);
23488 } else
23489 dev_kfree_skb_irq(oldskb);
23490diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
23491--- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
23492+++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
23493@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23494
23495
23496 #define ADD_LIMITED(s,v) \
23497- atomic_add((v),&stats->s); \
23498- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23499+ atomic_add_unchecked((v),&stats->s); \
23500+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23501
23502
23503 static void suni_hz(unsigned long from_timer)
23504diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
23505--- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
23506+++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
23507@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23508 struct sonet_stats tmp;
23509 int error = 0;
23510
23511- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23512+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23513 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23514 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23515 if (zero && !error) {
23516@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23517
23518
23519 #define ADD_LIMITED(s,v) \
23520- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23521- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23522- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23523+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23524+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23525+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23526
23527
23528 static void stat_event(struct atm_dev *dev)
23529@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23530 if (reason & uPD98402_INT_PFM) stat_event(dev);
23531 if (reason & uPD98402_INT_PCO) {
23532 (void) GET(PCOCR); /* clear interrupt cause */
23533- atomic_add(GET(HECCT),
23534+ atomic_add_unchecked(GET(HECCT),
23535 &PRIV(dev)->sonet_stats.uncorr_hcs);
23536 }
23537 if ((reason & uPD98402_INT_RFO) &&
23538@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23539 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23540 uPD98402_INT_LOS),PIMR); /* enable them */
23541 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23542- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23543- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23544- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23545+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23546+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23547+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23548 return 0;
23549 }
23550
23551diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23552--- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23553+++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23554@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23555 }
23556 if (!size) {
23557 dev_kfree_skb_irq(skb);
23558- if (vcc) atomic_inc(&vcc->stats->rx_err);
23559+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23560 continue;
23561 }
23562 if (!atm_charge(vcc,skb->truesize)) {
23563@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23564 skb->len = size;
23565 ATM_SKB(skb)->vcc = vcc;
23566 vcc->push(vcc,skb);
23567- atomic_inc(&vcc->stats->rx);
23568+ atomic_inc_unchecked(&vcc->stats->rx);
23569 }
23570 zout(pos & 0xffff,MTA(mbx));
23571 #if 0 /* probably a stupid idea */
23572@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23573 skb_queue_head(&zatm_vcc->backlog,skb);
23574 break;
23575 }
23576- atomic_inc(&vcc->stats->tx);
23577+ atomic_inc_unchecked(&vcc->stats->tx);
23578 wake_up(&zatm_vcc->tx_wait);
23579 }
23580
23581diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23582--- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23583+++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23584@@ -29,14 +29,14 @@ bool events_check_enabled;
23585 * They need to be modified together atomically, so it's better to use one
23586 * atomic variable to hold them both.
23587 */
23588-static atomic_t combined_event_count = ATOMIC_INIT(0);
23589+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23590
23591 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23592 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23593
23594 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23595 {
23596- unsigned int comb = atomic_read(&combined_event_count);
23597+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23598
23599 *cnt = (comb >> IN_PROGRESS_BITS);
23600 *inpr = comb & MAX_IN_PROGRESS;
23601@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23602 ws->last_time = ktime_get();
23603
23604 /* Increment the counter of events in progress. */
23605- atomic_inc(&combined_event_count);
23606+ atomic_inc_unchecked(&combined_event_count);
23607 }
23608
23609 /**
23610@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23611 * Increment the counter of registered wakeup events and decrement the
23612 * couter of wakeup events in progress simultaneously.
23613 */
23614- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23615+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23616 }
23617
23618 /**
23619diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23620--- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23621+++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23622@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23623 int err;
23624 u32 cp;
23625
23626+ memset(&arg64, 0, sizeof(arg64));
23627+
23628 err = 0;
23629 err |=
23630 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23631@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23632 while (!list_empty(&h->reqQ)) {
23633 c = list_entry(h->reqQ.next, CommandList_struct, list);
23634 /* can't do anything if fifo is full */
23635- if ((h->access.fifo_full(h))) {
23636+ if ((h->access->fifo_full(h))) {
23637 dev_warn(&h->pdev->dev, "fifo full\n");
23638 break;
23639 }
23640@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23641 h->Qdepth--;
23642
23643 /* Tell the controller execute command */
23644- h->access.submit_command(h, c);
23645+ h->access->submit_command(h, c);
23646
23647 /* Put job onto the completed Q */
23648 addQ(&h->cmpQ, c);
23649@@ -3422,17 +3424,17 @@ startio:
23650
23651 static inline unsigned long get_next_completion(ctlr_info_t *h)
23652 {
23653- return h->access.command_completed(h);
23654+ return h->access->command_completed(h);
23655 }
23656
23657 static inline int interrupt_pending(ctlr_info_t *h)
23658 {
23659- return h->access.intr_pending(h);
23660+ return h->access->intr_pending(h);
23661 }
23662
23663 static inline long interrupt_not_for_us(ctlr_info_t *h)
23664 {
23665- return ((h->access.intr_pending(h) == 0) ||
23666+ return ((h->access->intr_pending(h) == 0) ||
23667 (h->interrupts_enabled == 0));
23668 }
23669
23670@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23671 u32 a;
23672
23673 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23674- return h->access.command_completed(h);
23675+ return h->access->command_completed(h);
23676
23677 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23678 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23679@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23680 trans_support & CFGTBL_Trans_use_short_tags);
23681
23682 /* Change the access methods to the performant access methods */
23683- h->access = SA5_performant_access;
23684+ h->access = &SA5_performant_access;
23685 h->transMethod = CFGTBL_Trans_Performant;
23686
23687 return;
23688@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23689 if (prod_index < 0)
23690 return -ENODEV;
23691 h->product_name = products[prod_index].product_name;
23692- h->access = *(products[prod_index].access);
23693+ h->access = products[prod_index].access;
23694
23695 if (cciss_board_disabled(h)) {
23696 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23697@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23698 }
23699
23700 /* make sure the board interrupts are off */
23701- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23702+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23703 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23704 if (rc)
23705 goto clean2;
23706@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23707 * fake ones to scoop up any residual completions.
23708 */
23709 spin_lock_irqsave(&h->lock, flags);
23710- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23711+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23712 spin_unlock_irqrestore(&h->lock, flags);
23713 free_irq(h->intr[PERF_MODE_INT], h);
23714 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23715@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23716 dev_info(&h->pdev->dev, "Board READY.\n");
23717 dev_info(&h->pdev->dev,
23718 "Waiting for stale completions to drain.\n");
23719- h->access.set_intr_mask(h, CCISS_INTR_ON);
23720+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23721 msleep(10000);
23722- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23723+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23724
23725 rc = controller_reset_failed(h->cfgtable);
23726 if (rc)
23727@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23728 cciss_scsi_setup(h);
23729
23730 /* Turn the interrupts on so we can service requests */
23731- h->access.set_intr_mask(h, CCISS_INTR_ON);
23732+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23733
23734 /* Get the firmware version */
23735 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23736@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23737 kfree(flush_buf);
23738 if (return_code != IO_OK)
23739 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23740- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23741+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23742 free_irq(h->intr[PERF_MODE_INT], h);
23743 }
23744
23745diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23746--- linux-3.0.4/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
23747+++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23748@@ -100,7 +100,7 @@ struct ctlr_info
23749 /* information about each logical volume */
23750 drive_info_struct *drv[CISS_MAX_LUN];
23751
23752- struct access_method access;
23753+ struct access_method *access;
23754
23755 /* queue and queue Info */
23756 struct list_head reqQ;
23757diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23758--- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23759+++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23760@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23761 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23762 goto Enomem4;
23763 }
23764- hba[i]->access.set_intr_mask(hba[i], 0);
23765+ hba[i]->access->set_intr_mask(hba[i], 0);
23766 if (request_irq(hba[i]->intr, do_ida_intr,
23767 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23768 {
23769@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23770 add_timer(&hba[i]->timer);
23771
23772 /* Enable IRQ now that spinlock and rate limit timer are set up */
23773- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23774+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23775
23776 for(j=0; j<NWD; j++) {
23777 struct gendisk *disk = ida_gendisk[i][j];
23778@@ -694,7 +694,7 @@ DBGINFO(
23779 for(i=0; i<NR_PRODUCTS; i++) {
23780 if (board_id == products[i].board_id) {
23781 c->product_name = products[i].product_name;
23782- c->access = *(products[i].access);
23783+ c->access = products[i].access;
23784 break;
23785 }
23786 }
23787@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23788 hba[ctlr]->intr = intr;
23789 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23790 hba[ctlr]->product_name = products[j].product_name;
23791- hba[ctlr]->access = *(products[j].access);
23792+ hba[ctlr]->access = products[j].access;
23793 hba[ctlr]->ctlr = ctlr;
23794 hba[ctlr]->board_id = board_id;
23795 hba[ctlr]->pci_dev = NULL; /* not PCI */
23796@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23797 struct scatterlist tmp_sg[SG_MAX];
23798 int i, dir, seg;
23799
23800+ pax_track_stack();
23801+
23802 queue_next:
23803 creq = blk_peek_request(q);
23804 if (!creq)
23805@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23806
23807 while((c = h->reqQ) != NULL) {
23808 /* Can't do anything if we're busy */
23809- if (h->access.fifo_full(h) == 0)
23810+ if (h->access->fifo_full(h) == 0)
23811 return;
23812
23813 /* Get the first entry from the request Q */
23814@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23815 h->Qdepth--;
23816
23817 /* Tell the controller to do our bidding */
23818- h->access.submit_command(h, c);
23819+ h->access->submit_command(h, c);
23820
23821 /* Get onto the completion Q */
23822 addQ(&h->cmpQ, c);
23823@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23824 unsigned long flags;
23825 __u32 a,a1;
23826
23827- istat = h->access.intr_pending(h);
23828+ istat = h->access->intr_pending(h);
23829 /* Is this interrupt for us? */
23830 if (istat == 0)
23831 return IRQ_NONE;
23832@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23833 */
23834 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23835 if (istat & FIFO_NOT_EMPTY) {
23836- while((a = h->access.command_completed(h))) {
23837+ while((a = h->access->command_completed(h))) {
23838 a1 = a; a &= ~3;
23839 if ((c = h->cmpQ) == NULL)
23840 {
23841@@ -1449,11 +1451,11 @@ static int sendcmd(
23842 /*
23843 * Disable interrupt
23844 */
23845- info_p->access.set_intr_mask(info_p, 0);
23846+ info_p->access->set_intr_mask(info_p, 0);
23847 /* Make sure there is room in the command FIFO */
23848 /* Actually it should be completely empty at this time. */
23849 for (i = 200000; i > 0; i--) {
23850- temp = info_p->access.fifo_full(info_p);
23851+ temp = info_p->access->fifo_full(info_p);
23852 if (temp != 0) {
23853 break;
23854 }
23855@@ -1466,7 +1468,7 @@ DBG(
23856 /*
23857 * Send the cmd
23858 */
23859- info_p->access.submit_command(info_p, c);
23860+ info_p->access->submit_command(info_p, c);
23861 complete = pollcomplete(ctlr);
23862
23863 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23864@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23865 * we check the new geometry. Then turn interrupts back on when
23866 * we're done.
23867 */
23868- host->access.set_intr_mask(host, 0);
23869+ host->access->set_intr_mask(host, 0);
23870 getgeometry(ctlr);
23871- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23872+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23873
23874 for(i=0; i<NWD; i++) {
23875 struct gendisk *disk = ida_gendisk[ctlr][i];
23876@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23877 /* Wait (up to 2 seconds) for a command to complete */
23878
23879 for (i = 200000; i > 0; i--) {
23880- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23881+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23882 if (done == 0) {
23883 udelay(10); /* a short fixed delay */
23884 } else
23885diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23886--- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23887+++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23888@@ -99,7 +99,7 @@ struct ctlr_info {
23889 drv_info_t drv[NWD];
23890 struct proc_dir_entry *proc;
23891
23892- struct access_method access;
23893+ struct access_method *access;
23894
23895 cmdlist_t *reqQ;
23896 cmdlist_t *cmpQ;
23897diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23898--- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23899+++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23900@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23901 unsigned long flags;
23902 int Channel, TargetID;
23903
23904+ pax_track_stack();
23905+
23906 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23907 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23908 sizeof(DAC960_SCSI_Inquiry_T) +
23909diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23910--- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23911+++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23912@@ -737,7 +737,7 @@ struct drbd_request;
23913 struct drbd_epoch {
23914 struct list_head list;
23915 unsigned int barrier_nr;
23916- atomic_t epoch_size; /* increased on every request added. */
23917+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23918 atomic_t active; /* increased on every req. added, and dec on every finished. */
23919 unsigned long flags;
23920 };
23921@@ -1109,7 +1109,7 @@ struct drbd_conf {
23922 void *int_dig_in;
23923 void *int_dig_vv;
23924 wait_queue_head_t seq_wait;
23925- atomic_t packet_seq;
23926+ atomic_unchecked_t packet_seq;
23927 unsigned int peer_seq;
23928 spinlock_t peer_seq_lock;
23929 unsigned int minor;
23930diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23931--- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23932+++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23933@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23934 p.sector = sector;
23935 p.block_id = block_id;
23936 p.blksize = blksize;
23937- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23938+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23939
23940 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23941 return false;
23942@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23943 p.sector = cpu_to_be64(req->sector);
23944 p.block_id = (unsigned long)req;
23945 p.seq_num = cpu_to_be32(req->seq_num =
23946- atomic_add_return(1, &mdev->packet_seq));
23947+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23948
23949 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23950
23951@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23952 atomic_set(&mdev->unacked_cnt, 0);
23953 atomic_set(&mdev->local_cnt, 0);
23954 atomic_set(&mdev->net_cnt, 0);
23955- atomic_set(&mdev->packet_seq, 0);
23956+ atomic_set_unchecked(&mdev->packet_seq, 0);
23957 atomic_set(&mdev->pp_in_use, 0);
23958 atomic_set(&mdev->pp_in_use_by_net, 0);
23959 atomic_set(&mdev->rs_sect_in, 0);
23960@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23961 mdev->receiver.t_state);
23962
23963 /* no need to lock it, I'm the only thread alive */
23964- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23965- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23966+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23967+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23968 mdev->al_writ_cnt =
23969 mdev->bm_writ_cnt =
23970 mdev->read_cnt =
23971diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23972--- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23973+++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23974@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23975 module_put(THIS_MODULE);
23976 }
23977
23978-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23979+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23980
23981 static unsigned short *
23982 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23983@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23984 cn_reply->id.idx = CN_IDX_DRBD;
23985 cn_reply->id.val = CN_VAL_DRBD;
23986
23987- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23988+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23989 cn_reply->ack = 0; /* not used here. */
23990 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23991 (int)((char *)tl - (char *)reply->tag_list);
23992@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23993 cn_reply->id.idx = CN_IDX_DRBD;
23994 cn_reply->id.val = CN_VAL_DRBD;
23995
23996- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23997+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23998 cn_reply->ack = 0; /* not used here. */
23999 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24000 (int)((char *)tl - (char *)reply->tag_list);
24001@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24002 cn_reply->id.idx = CN_IDX_DRBD;
24003 cn_reply->id.val = CN_VAL_DRBD;
24004
24005- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24006+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24007 cn_reply->ack = 0; // not used here.
24008 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24009 (int)((char*)tl - (char*)reply->tag_list);
24010@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24011 cn_reply->id.idx = CN_IDX_DRBD;
24012 cn_reply->id.val = CN_VAL_DRBD;
24013
24014- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24015+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24016 cn_reply->ack = 0; /* not used here. */
24017 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24018 (int)((char *)tl - (char *)reply->tag_list);
24019diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
24020--- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24021+++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24022@@ -894,7 +894,7 @@ retry:
24023 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24024 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24025
24026- atomic_set(&mdev->packet_seq, 0);
24027+ atomic_set_unchecked(&mdev->packet_seq, 0);
24028 mdev->peer_seq = 0;
24029
24030 drbd_thread_start(&mdev->asender);
24031@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24032 do {
24033 next_epoch = NULL;
24034
24035- epoch_size = atomic_read(&epoch->epoch_size);
24036+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24037
24038 switch (ev & ~EV_CLEANUP) {
24039 case EV_PUT:
24040@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24041 rv = FE_DESTROYED;
24042 } else {
24043 epoch->flags = 0;
24044- atomic_set(&epoch->epoch_size, 0);
24045+ atomic_set_unchecked(&epoch->epoch_size, 0);
24046 /* atomic_set(&epoch->active, 0); is already zero */
24047 if (rv == FE_STILL_LIVE)
24048 rv = FE_RECYCLED;
24049@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24050 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24051 drbd_flush(mdev);
24052
24053- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24054+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24055 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24056 if (epoch)
24057 break;
24058 }
24059
24060 epoch = mdev->current_epoch;
24061- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24062+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24063
24064 D_ASSERT(atomic_read(&epoch->active) == 0);
24065 D_ASSERT(epoch->flags == 0);
24066@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24067 }
24068
24069 epoch->flags = 0;
24070- atomic_set(&epoch->epoch_size, 0);
24071+ atomic_set_unchecked(&epoch->epoch_size, 0);
24072 atomic_set(&epoch->active, 0);
24073
24074 spin_lock(&mdev->epoch_lock);
24075- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24076+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24077 list_add(&epoch->list, &mdev->current_epoch->list);
24078 mdev->current_epoch = epoch;
24079 mdev->epochs++;
24080@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24081 spin_unlock(&mdev->peer_seq_lock);
24082
24083 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24084- atomic_inc(&mdev->current_epoch->epoch_size);
24085+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24086 return drbd_drain_block(mdev, data_size);
24087 }
24088
24089@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24090
24091 spin_lock(&mdev->epoch_lock);
24092 e->epoch = mdev->current_epoch;
24093- atomic_inc(&e->epoch->epoch_size);
24094+ atomic_inc_unchecked(&e->epoch->epoch_size);
24095 atomic_inc(&e->epoch->active);
24096 spin_unlock(&mdev->epoch_lock);
24097
24098@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24099 D_ASSERT(list_empty(&mdev->done_ee));
24100
24101 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24102- atomic_set(&mdev->current_epoch->epoch_size, 0);
24103+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24104 D_ASSERT(list_empty(&mdev->current_epoch->list));
24105 }
24106
24107diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
24108--- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24109+++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24110@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24111 struct kvec iov;
24112 sigset_t blocked, oldset;
24113
24114+ pax_track_stack();
24115+
24116 if (unlikely(!sock)) {
24117 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24118 lo->disk->disk_name, (send ? "send" : "recv"));
24119@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24120 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24121 unsigned int cmd, unsigned long arg)
24122 {
24123+ pax_track_stack();
24124+
24125 switch (cmd) {
24126 case NBD_DISCONNECT: {
24127 struct request sreq;
24128diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
24129--- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24130+++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24131@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24132 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24133 return -EFAULT;
24134
24135- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24136+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24137 return -EFAULT;
24138
24139 client = agp_find_client_by_pid(reserve.pid);
24140diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
24141--- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24142+++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24143@@ -9,6 +9,7 @@
24144 #include <linux/types.h>
24145 #include <linux/errno.h>
24146 #include <linux/tty.h>
24147+#include <linux/mutex.h>
24148 #include <linux/timer.h>
24149 #include <linux/kernel.h>
24150 #include <linux/wait.h>
24151@@ -34,6 +35,7 @@ static int vfd_is_open;
24152 static unsigned char vfd[40];
24153 static int vfd_cursor;
24154 static unsigned char ledpb, led;
24155+static DEFINE_MUTEX(vfd_mutex);
24156
24157 static void update_vfd(void)
24158 {
24159@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24160 if (!vfd_is_open)
24161 return -EBUSY;
24162
24163+ mutex_lock(&vfd_mutex);
24164 for (;;) {
24165 char c;
24166 if (!indx)
24167 break;
24168- if (get_user(c, buf))
24169+ if (get_user(c, buf)) {
24170+ mutex_unlock(&vfd_mutex);
24171 return -EFAULT;
24172+ }
24173 if (esc) {
24174 set_led(c);
24175 esc = 0;
24176@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24177 buf++;
24178 }
24179 update_vfd();
24180+ mutex_unlock(&vfd_mutex);
24181
24182 return len;
24183 }
24184diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
24185--- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24186+++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24187@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24188 switch (cmd) {
24189
24190 case RTC_PLL_GET:
24191+ memset(&pll, 0, sizeof(pll));
24192 if (get_rtc_pll(&pll))
24193 return -EINVAL;
24194 else
24195diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
24196--- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24197+++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24198@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24199 }
24200
24201 static int
24202-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24203+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24204 struct hpet_info *info)
24205 {
24206 struct hpet_timer __iomem *timer;
24207diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
24208--- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24209+++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24210@@ -415,7 +415,7 @@ struct ipmi_smi {
24211 struct proc_dir_entry *proc_dir;
24212 char proc_dir_name[10];
24213
24214- atomic_t stats[IPMI_NUM_STATS];
24215+ atomic_unchecked_t stats[IPMI_NUM_STATS];
24216
24217 /*
24218 * run_to_completion duplicate of smb_info, smi_info
24219@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24220
24221
24222 #define ipmi_inc_stat(intf, stat) \
24223- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24224+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24225 #define ipmi_get_stat(intf, stat) \
24226- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24227+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24228
24229 static int is_lan_addr(struct ipmi_addr *addr)
24230 {
24231@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24232 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24233 init_waitqueue_head(&intf->waitq);
24234 for (i = 0; i < IPMI_NUM_STATS; i++)
24235- atomic_set(&intf->stats[i], 0);
24236+ atomic_set_unchecked(&intf->stats[i], 0);
24237
24238 intf->proc_dir = NULL;
24239
24240@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24241 struct ipmi_smi_msg smi_msg;
24242 struct ipmi_recv_msg recv_msg;
24243
24244+ pax_track_stack();
24245+
24246 si = (struct ipmi_system_interface_addr *) &addr;
24247 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24248 si->channel = IPMI_BMC_CHANNEL;
24249diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
24250--- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24251+++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24252@@ -277,7 +277,7 @@ struct smi_info {
24253 unsigned char slave_addr;
24254
24255 /* Counters and things for the proc filesystem. */
24256- atomic_t stats[SI_NUM_STATS];
24257+ atomic_unchecked_t stats[SI_NUM_STATS];
24258
24259 struct task_struct *thread;
24260
24261@@ -286,9 +286,9 @@ struct smi_info {
24262 };
24263
24264 #define smi_inc_stat(smi, stat) \
24265- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24266+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24267 #define smi_get_stat(smi, stat) \
24268- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24269+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24270
24271 #define SI_MAX_PARMS 4
24272
24273@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24274 atomic_set(&new_smi->req_events, 0);
24275 new_smi->run_to_completion = 0;
24276 for (i = 0; i < SI_NUM_STATS; i++)
24277- atomic_set(&new_smi->stats[i], 0);
24278+ atomic_set_unchecked(&new_smi->stats[i], 0);
24279
24280 new_smi->interrupt_disabled = 1;
24281 atomic_set(&new_smi->stop_operation, 0);
24282diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
24283--- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24284+++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24285@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24286
24287 config DEVKMEM
24288 bool "/dev/kmem virtual device support"
24289- default y
24290+ default n
24291+ depends on !GRKERNSEC_KMEM
24292 help
24293 Say Y here if you want to support the /dev/kmem device. The
24294 /dev/kmem device is rarely used, but can be used for certain
24295@@ -596,6 +597,7 @@ config DEVPORT
24296 bool
24297 depends on !M68K
24298 depends on ISA || PCI
24299+ depends on !GRKERNSEC_KMEM
24300 default y
24301
24302 source "drivers/s390/char/Kconfig"
24303diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
24304--- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24305+++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24306@@ -18,6 +18,7 @@
24307 #include <linux/raw.h>
24308 #include <linux/tty.h>
24309 #include <linux/capability.h>
24310+#include <linux/security.h>
24311 #include <linux/ptrace.h>
24312 #include <linux/device.h>
24313 #include <linux/highmem.h>
24314@@ -34,6 +35,10 @@
24315 # include <linux/efi.h>
24316 #endif
24317
24318+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24319+extern struct file_operations grsec_fops;
24320+#endif
24321+
24322 static inline unsigned long size_inside_page(unsigned long start,
24323 unsigned long size)
24324 {
24325@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24326
24327 while (cursor < to) {
24328 if (!devmem_is_allowed(pfn)) {
24329+#ifdef CONFIG_GRKERNSEC_KMEM
24330+ gr_handle_mem_readwrite(from, to);
24331+#else
24332 printk(KERN_INFO
24333 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24334 current->comm, from, to);
24335+#endif
24336 return 0;
24337 }
24338 cursor += PAGE_SIZE;
24339@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24340 }
24341 return 1;
24342 }
24343+#elif defined(CONFIG_GRKERNSEC_KMEM)
24344+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24345+{
24346+ return 0;
24347+}
24348 #else
24349 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24350 {
24351@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24352
24353 while (count > 0) {
24354 unsigned long remaining;
24355+ char *temp;
24356
24357 sz = size_inside_page(p, count);
24358
24359@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24360 if (!ptr)
24361 return -EFAULT;
24362
24363- remaining = copy_to_user(buf, ptr, sz);
24364+#ifdef CONFIG_PAX_USERCOPY
24365+ temp = kmalloc(sz, GFP_KERNEL);
24366+ if (!temp) {
24367+ unxlate_dev_mem_ptr(p, ptr);
24368+ return -ENOMEM;
24369+ }
24370+ memcpy(temp, ptr, sz);
24371+#else
24372+ temp = ptr;
24373+#endif
24374+
24375+ remaining = copy_to_user(buf, temp, sz);
24376+
24377+#ifdef CONFIG_PAX_USERCOPY
24378+ kfree(temp);
24379+#endif
24380+
24381 unxlate_dev_mem_ptr(p, ptr);
24382 if (remaining)
24383 return -EFAULT;
24384@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
24385 size_t count, loff_t *ppos)
24386 {
24387 unsigned long p = *ppos;
24388- ssize_t low_count, read, sz;
24389+ ssize_t low_count, read, sz, err = 0;
24390 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
24391- int err = 0;
24392
24393 read = 0;
24394 if (p < (unsigned long) high_memory) {
24395@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
24396 }
24397 #endif
24398 while (low_count > 0) {
24399+ char *temp;
24400+
24401 sz = size_inside_page(p, low_count);
24402
24403 /*
24404@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24405 */
24406 kbuf = xlate_dev_kmem_ptr((char *)p);
24407
24408- if (copy_to_user(buf, kbuf, sz))
24409+#ifdef CONFIG_PAX_USERCOPY
24410+ temp = kmalloc(sz, GFP_KERNEL);
24411+ if (!temp)
24412+ return -ENOMEM;
24413+ memcpy(temp, kbuf, sz);
24414+#else
24415+ temp = kbuf;
24416+#endif
24417+
24418+ err = copy_to_user(buf, temp, sz);
24419+
24420+#ifdef CONFIG_PAX_USERCOPY
24421+ kfree(temp);
24422+#endif
24423+
24424+ if (err)
24425 return -EFAULT;
24426 buf += sz;
24427 p += sz;
24428@@ -866,6 +913,9 @@ static const struct memdev {
24429 #ifdef CONFIG_CRASH_DUMP
24430 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24431 #endif
24432+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24433+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24434+#endif
24435 };
24436
24437 static int memory_open(struct inode *inode, struct file *filp)
24438diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
24439--- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
24440+++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
24441@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24442
24443 spin_unlock_irq(&rtc_lock);
24444
24445- if (copy_to_user(buf, contents, tmp - contents))
24446+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24447 return -EFAULT;
24448
24449 *ppos = i;
24450diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
24451--- linux-3.0.4/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
24452+++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
24453@@ -261,8 +261,13 @@
24454 /*
24455 * Configuration information
24456 */
24457+#ifdef CONFIG_GRKERNSEC_RANDNET
24458+#define INPUT_POOL_WORDS 512
24459+#define OUTPUT_POOL_WORDS 128
24460+#else
24461 #define INPUT_POOL_WORDS 128
24462 #define OUTPUT_POOL_WORDS 32
24463+#endif
24464 #define SEC_XFER_SIZE 512
24465 #define EXTRACT_SIZE 10
24466
24467@@ -300,10 +305,17 @@ static struct poolinfo {
24468 int poolwords;
24469 int tap1, tap2, tap3, tap4, tap5;
24470 } poolinfo_table[] = {
24471+#ifdef CONFIG_GRKERNSEC_RANDNET
24472+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24473+ { 512, 411, 308, 208, 104, 1 },
24474+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24475+ { 128, 103, 76, 51, 25, 1 },
24476+#else
24477 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24478 { 128, 103, 76, 51, 25, 1 },
24479 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24480 { 32, 26, 20, 14, 7, 1 },
24481+#endif
24482 #if 0
24483 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24484 { 2048, 1638, 1231, 819, 411, 1 },
24485@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24486
24487 extract_buf(r, tmp);
24488 i = min_t(int, nbytes, EXTRACT_SIZE);
24489- if (copy_to_user(buf, tmp, i)) {
24490+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24491 ret = -EFAULT;
24492 break;
24493 }
24494@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24495 #include <linux/sysctl.h>
24496
24497 static int min_read_thresh = 8, min_write_thresh;
24498-static int max_read_thresh = INPUT_POOL_WORDS * 32;
24499+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24500 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24501 static char sysctl_bootid[16];
24502
24503diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
24504--- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
24505+++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
24506@@ -55,6 +55,7 @@
24507 #include <asm/uaccess.h>
24508 #include <asm/io.h>
24509 #include <asm/system.h>
24510+#include <asm/local.h>
24511
24512 #include <linux/sonypi.h>
24513
24514@@ -491,7 +492,7 @@ static struct sonypi_device {
24515 spinlock_t fifo_lock;
24516 wait_queue_head_t fifo_proc_list;
24517 struct fasync_struct *fifo_async;
24518- int open_count;
24519+ local_t open_count;
24520 int model;
24521 struct input_dev *input_jog_dev;
24522 struct input_dev *input_key_dev;
24523@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24524 static int sonypi_misc_release(struct inode *inode, struct file *file)
24525 {
24526 mutex_lock(&sonypi_device.lock);
24527- sonypi_device.open_count--;
24528+ local_dec(&sonypi_device.open_count);
24529 mutex_unlock(&sonypi_device.lock);
24530 return 0;
24531 }
24532@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24533 {
24534 mutex_lock(&sonypi_device.lock);
24535 /* Flush input queue on first open */
24536- if (!sonypi_device.open_count)
24537+ if (!local_read(&sonypi_device.open_count))
24538 kfifo_reset(&sonypi_device.fifo);
24539- sonypi_device.open_count++;
24540+ local_inc(&sonypi_device.open_count);
24541 mutex_unlock(&sonypi_device.lock);
24542
24543 return 0;
24544diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
24545--- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
24546+++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
24547@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24548 event = addr;
24549
24550 if ((event->event_type == 0 && event->event_size == 0) ||
24551- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24552+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24553 return NULL;
24554
24555 return addr;
24556@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24557 return NULL;
24558
24559 if ((event->event_type == 0 && event->event_size == 0) ||
24560- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24561+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24562 return NULL;
24563
24564 (*pos)++;
24565@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24566 int i;
24567
24568 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24569- seq_putc(m, data[i]);
24570+ if (!seq_putc(m, data[i]))
24571+ return -EFAULT;
24572
24573 return 0;
24574 }
24575@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24576 log->bios_event_log_end = log->bios_event_log + len;
24577
24578 virt = acpi_os_map_memory(start, len);
24579+ if (!virt) {
24580+ kfree(log->bios_event_log);
24581+ log->bios_event_log = NULL;
24582+ return -EFAULT;
24583+ }
24584
24585 memcpy(log->bios_event_log, virt, len);
24586
24587diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24588--- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24589+++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24590@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24591 chip->vendor.req_complete_val)
24592 goto out_recv;
24593
24594- if ((status == chip->vendor.req_canceled)) {
24595+ if (status == chip->vendor.req_canceled) {
24596 dev_err(chip->dev, "Operation Canceled\n");
24597 rc = -ECANCELED;
24598 goto out;
24599@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24600
24601 struct tpm_chip *chip = dev_get_drvdata(dev);
24602
24603+ pax_track_stack();
24604+
24605 tpm_cmd.header.in = tpm_readpubek_header;
24606 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24607 "attempting to read the PUBEK");
24608diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24609--- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24610+++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24611@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24612 0xCA, 0x34, 0x2B, 0x2E};
24613 struct scatterlist sg;
24614
24615+ pax_track_stack();
24616+
24617 memset(src, 0, sizeof(src));
24618 memset(ctx.key, 0, sizeof(ctx.key));
24619
24620diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24621--- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24622+++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24623@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24624 struct crypto_aes_ctx gen_aes;
24625 int cpu;
24626
24627+ pax_track_stack();
24628+
24629 if (key_len % 8) {
24630 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24631 return -EINVAL;
24632diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24633--- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24634+++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24635@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24636 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24637 static int edac_pci_poll_msec = 1000; /* one second workq period */
24638
24639-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24640-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24641+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24642+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24643
24644 static struct kobject *edac_pci_top_main_kobj;
24645 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24646@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24647 edac_printk(KERN_CRIT, EDAC_PCI,
24648 "Signaled System Error on %s\n",
24649 pci_name(dev));
24650- atomic_inc(&pci_nonparity_count);
24651+ atomic_inc_unchecked(&pci_nonparity_count);
24652 }
24653
24654 if (status & (PCI_STATUS_PARITY)) {
24655@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24656 "Master Data Parity Error on %s\n",
24657 pci_name(dev));
24658
24659- atomic_inc(&pci_parity_count);
24660+ atomic_inc_unchecked(&pci_parity_count);
24661 }
24662
24663 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24664@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24665 "Detected Parity Error on %s\n",
24666 pci_name(dev));
24667
24668- atomic_inc(&pci_parity_count);
24669+ atomic_inc_unchecked(&pci_parity_count);
24670 }
24671 }
24672
24673@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24674 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24675 "Signaled System Error on %s\n",
24676 pci_name(dev));
24677- atomic_inc(&pci_nonparity_count);
24678+ atomic_inc_unchecked(&pci_nonparity_count);
24679 }
24680
24681 if (status & (PCI_STATUS_PARITY)) {
24682@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24683 "Master Data Parity Error on "
24684 "%s\n", pci_name(dev));
24685
24686- atomic_inc(&pci_parity_count);
24687+ atomic_inc_unchecked(&pci_parity_count);
24688 }
24689
24690 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24691@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24692 "Detected Parity Error on %s\n",
24693 pci_name(dev));
24694
24695- atomic_inc(&pci_parity_count);
24696+ atomic_inc_unchecked(&pci_parity_count);
24697 }
24698 }
24699 }
24700@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24701 if (!check_pci_errors)
24702 return;
24703
24704- before_count = atomic_read(&pci_parity_count);
24705+ before_count = atomic_read_unchecked(&pci_parity_count);
24706
24707 /* scan all PCI devices looking for a Parity Error on devices and
24708 * bridges.
24709@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24710 /* Only if operator has selected panic on PCI Error */
24711 if (edac_pci_get_panic_on_pe()) {
24712 /* If the count is different 'after' from 'before' */
24713- if (before_count != atomic_read(&pci_parity_count))
24714+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24715 panic("EDAC: PCI Parity Error");
24716 }
24717 }
24718diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24719--- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24720+++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24721@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24722 bool (*dc_mce)(u16, u8);
24723 bool (*ic_mce)(u16, u8);
24724 bool (*nb_mce)(u16, u8);
24725-};
24726+} __no_const;
24727
24728 void amd_report_gart_errors(bool);
24729 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24730diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24731--- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24732+++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24733@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24734
24735 void fw_core_remove_card(struct fw_card *card)
24736 {
24737- struct fw_card_driver dummy_driver = dummy_driver_template;
24738+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24739
24740 card->driver->update_phy_reg(card, 4,
24741 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24742diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24743--- linux-3.0.4/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
24744+++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24745@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24746 int ret;
24747
24748 if ((request->channels == 0 && request->bandwidth == 0) ||
24749- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24750- request->bandwidth < 0)
24751+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24752 return -EINVAL;
24753
24754 r = kmalloc(sizeof(*r), GFP_KERNEL);
24755diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24756--- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24757+++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24758@@ -101,6 +101,7 @@ struct fw_card_driver {
24759
24760 int (*stop_iso)(struct fw_iso_context *ctx);
24761 };
24762+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24763
24764 void fw_card_initialize(struct fw_card *card,
24765 const struct fw_card_driver *driver, struct device *device);
24766diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24767--- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24768+++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24769@@ -37,6 +37,7 @@
24770 #include <linux/timer.h>
24771 #include <linux/types.h>
24772 #include <linux/workqueue.h>
24773+#include <linux/sched.h>
24774
24775 #include <asm/byteorder.h>
24776
24777@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24778 struct transaction_callback_data d;
24779 struct fw_transaction t;
24780
24781+ pax_track_stack();
24782+
24783 init_timer_on_stack(&t.split_timeout_timer);
24784 init_completion(&d.done);
24785 d.payload = payload;
24786diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24787--- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24788+++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24789@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24790 }
24791 }
24792 else {
24793- /*
24794- * no iounmap() for that ioremap(); it would be a no-op, but
24795- * it's so early in setup that sucker gets confused into doing
24796- * what it shouldn't if we actually call it.
24797- */
24798 p = dmi_ioremap(0xF0000, 0x10000);
24799 if (p == NULL)
24800 goto error;
24801diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24802--- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24803+++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24804@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24805 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24806 maskl, pendl, maskh, pendh);
24807
24808- atomic_inc(&irq_err_count);
24809+ atomic_inc_unchecked(&irq_err_count);
24810
24811 return -EINVAL;
24812 }
24813diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24814--- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24815+++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24816@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24817 struct drm_crtc *tmp;
24818 int crtc_mask = 1;
24819
24820- WARN(!crtc, "checking null crtc?\n");
24821+ BUG_ON(!crtc);
24822
24823 dev = crtc->dev;
24824
24825@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24826 struct drm_encoder *encoder;
24827 bool ret = true;
24828
24829+ pax_track_stack();
24830+
24831 crtc->enabled = drm_helper_crtc_in_use(crtc);
24832 if (!crtc->enabled)
24833 return true;
24834diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24835--- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24836+++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24837@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24838
24839 dev = file_priv->minor->dev;
24840 atomic_inc(&dev->ioctl_count);
24841- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24842+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24843 ++file_priv->ioctl_count;
24844
24845 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24846diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24847--- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24848+++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24849@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24850 }
24851
24852 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24853- atomic_set(&dev->counts[i], 0);
24854+ atomic_set_unchecked(&dev->counts[i], 0);
24855
24856 dev->sigdata.lock = NULL;
24857
24858@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24859
24860 retcode = drm_open_helper(inode, filp, dev);
24861 if (!retcode) {
24862- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24863- if (!dev->open_count++)
24864+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24865+ if (local_inc_return(&dev->open_count) == 1)
24866 retcode = drm_setup(dev);
24867 }
24868 if (!retcode) {
24869@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24870
24871 mutex_lock(&drm_global_mutex);
24872
24873- DRM_DEBUG("open_count = %d\n", dev->open_count);
24874+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24875
24876 if (dev->driver->preclose)
24877 dev->driver->preclose(dev, file_priv);
24878@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24879 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24880 task_pid_nr(current),
24881 (long)old_encode_dev(file_priv->minor->device),
24882- dev->open_count);
24883+ local_read(&dev->open_count));
24884
24885 /* if the master has gone away we can't do anything with the lock */
24886 if (file_priv->minor->master)
24887@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24888 * End inline drm_release
24889 */
24890
24891- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24892- if (!--dev->open_count) {
24893+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24894+ if (local_dec_and_test(&dev->open_count)) {
24895 if (atomic_read(&dev->ioctl_count)) {
24896 DRM_ERROR("Device busy: %d\n",
24897 atomic_read(&dev->ioctl_count));
24898diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24899--- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24900+++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24901@@ -36,7 +36,7 @@
24902 struct drm_global_item {
24903 struct mutex mutex;
24904 void *object;
24905- int refcount;
24906+ atomic_t refcount;
24907 };
24908
24909 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24910@@ -49,7 +49,7 @@ void drm_global_init(void)
24911 struct drm_global_item *item = &glob[i];
24912 mutex_init(&item->mutex);
24913 item->object = NULL;
24914- item->refcount = 0;
24915+ atomic_set(&item->refcount, 0);
24916 }
24917 }
24918
24919@@ -59,7 +59,7 @@ void drm_global_release(void)
24920 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24921 struct drm_global_item *item = &glob[i];
24922 BUG_ON(item->object != NULL);
24923- BUG_ON(item->refcount != 0);
24924+ BUG_ON(atomic_read(&item->refcount) != 0);
24925 }
24926 }
24927
24928@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24929 void *object;
24930
24931 mutex_lock(&item->mutex);
24932- if (item->refcount == 0) {
24933+ if (atomic_read(&item->refcount) == 0) {
24934 item->object = kzalloc(ref->size, GFP_KERNEL);
24935 if (unlikely(item->object == NULL)) {
24936 ret = -ENOMEM;
24937@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24938 goto out_err;
24939
24940 }
24941- ++item->refcount;
24942+ atomic_inc(&item->refcount);
24943 ref->object = item->object;
24944 object = item->object;
24945 mutex_unlock(&item->mutex);
24946@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24947 struct drm_global_item *item = &glob[ref->global_type];
24948
24949 mutex_lock(&item->mutex);
24950- BUG_ON(item->refcount == 0);
24951+ BUG_ON(atomic_read(&item->refcount) == 0);
24952 BUG_ON(ref->object != item->object);
24953- if (--item->refcount == 0) {
24954+ if (atomic_dec_and_test(&item->refcount)) {
24955 ref->release(ref);
24956 item->object = NULL;
24957 }
24958diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24959--- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24960+++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24961@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24962 struct drm_local_map *map;
24963 struct drm_map_list *r_list;
24964
24965- /* Hardcoded from _DRM_FRAME_BUFFER,
24966- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24967- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24968- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24969+ static const char * const types[] = {
24970+ [_DRM_FRAME_BUFFER] = "FB",
24971+ [_DRM_REGISTERS] = "REG",
24972+ [_DRM_SHM] = "SHM",
24973+ [_DRM_AGP] = "AGP",
24974+ [_DRM_SCATTER_GATHER] = "SG",
24975+ [_DRM_CONSISTENT] = "PCI",
24976+ [_DRM_GEM] = "GEM" };
24977 const char *type;
24978 int i;
24979
24980@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24981 map = r_list->map;
24982 if (!map)
24983 continue;
24984- if (map->type < 0 || map->type > 5)
24985+ if (map->type >= ARRAY_SIZE(types))
24986 type = "??";
24987 else
24988 type = types[map->type];
24989@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24990 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24991 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24992 vma->vm_flags & VM_IO ? 'i' : '-',
24993+#ifdef CONFIG_GRKERNSEC_HIDESYM
24994+ 0);
24995+#else
24996 vma->vm_pgoff);
24997+#endif
24998
24999 #if defined(__i386__)
25000 pgprot = pgprot_val(vma->vm_page_prot);
25001diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
25002--- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
25003+++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
25004@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
25005 stats->data[i].value =
25006 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
25007 else
25008- stats->data[i].value = atomic_read(&dev->counts[i]);
25009+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
25010 stats->data[i].type = dev->types[i];
25011 }
25012
25013diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
25014--- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
25015+++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
25016@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
25017 if (drm_lock_take(&master->lock, lock->context)) {
25018 master->lock.file_priv = file_priv;
25019 master->lock.lock_time = jiffies;
25020- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
25021+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
25022 break; /* Got lock */
25023 }
25024
25025@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
25026 return -EINVAL;
25027 }
25028
25029- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
25030+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
25031
25032 if (drm_lock_free(&master->lock, lock->context)) {
25033 /* FIXME: Should really bail out here. */
25034diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
25035--- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
25036+++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
25037@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
25038 dma->buflist[vertex->idx],
25039 vertex->discard, vertex->used);
25040
25041- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25042- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25043+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25044+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25045 sarea_priv->last_enqueue = dev_priv->counter - 1;
25046 sarea_priv->last_dispatch = (int)hw_status[5];
25047
25048@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
25049 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
25050 mc->last_render);
25051
25052- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25053- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25054+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25055+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25056 sarea_priv->last_enqueue = dev_priv->counter - 1;
25057 sarea_priv->last_dispatch = (int)hw_status[5];
25058
25059diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
25060--- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
25061+++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
25062@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
25063 int page_flipping;
25064
25065 wait_queue_head_t irq_queue;
25066- atomic_t irq_received;
25067- atomic_t irq_emitted;
25068+ atomic_unchecked_t irq_received;
25069+ atomic_unchecked_t irq_emitted;
25070
25071 int front_offset;
25072 } drm_i810_private_t;
25073diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
25074--- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
25075+++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
25076@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
25077 I915_READ(GTIMR));
25078 }
25079 seq_printf(m, "Interrupts received: %d\n",
25080- atomic_read(&dev_priv->irq_received));
25081+ atomic_read_unchecked(&dev_priv->irq_received));
25082 for (i = 0; i < I915_NUM_RINGS; i++) {
25083 if (IS_GEN6(dev)) {
25084 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
25085diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
25086--- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
25087+++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
25088@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
25089 bool can_switch;
25090
25091 spin_lock(&dev->count_lock);
25092- can_switch = (dev->open_count == 0);
25093+ can_switch = (local_read(&dev->open_count) == 0);
25094 spin_unlock(&dev->count_lock);
25095 return can_switch;
25096 }
25097diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
25098--- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
25099+++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
25100@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
25101 /* render clock increase/decrease */
25102 /* display clock increase/decrease */
25103 /* pll clock increase/decrease */
25104-};
25105+} __no_const;
25106
25107 struct intel_device_info {
25108 u8 gen;
25109@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
25110 int current_page;
25111 int page_flipping;
25112
25113- atomic_t irq_received;
25114+ atomic_unchecked_t irq_received;
25115
25116 /* protects the irq masks */
25117 spinlock_t irq_lock;
25118@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
25119 * will be page flipped away on the next vblank. When it
25120 * reaches 0, dev_priv->pending_flip_queue will be woken up.
25121 */
25122- atomic_t pending_flip;
25123+ atomic_unchecked_t pending_flip;
25124 };
25125
25126 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
25127@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
25128 extern void intel_teardown_gmbus(struct drm_device *dev);
25129 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
25130 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
25131-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25132+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25133 {
25134 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
25135 }
25136diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
25137--- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
25138+++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
25139@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
25140 i915_gem_clflush_object(obj);
25141
25142 if (obj->base.pending_write_domain)
25143- cd->flips |= atomic_read(&obj->pending_flip);
25144+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
25145
25146 /* The actual obj->write_domain will be updated with
25147 * pending_write_domain after we emit the accumulated flush for all
25148diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
25149--- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
25150+++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
25151@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
25152 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
25153 struct drm_i915_master_private *master_priv;
25154
25155- atomic_inc(&dev_priv->irq_received);
25156+ atomic_inc_unchecked(&dev_priv->irq_received);
25157
25158 /* disable master interrupt before clearing iir */
25159 de_ier = I915_READ(DEIER);
25160@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
25161 struct drm_i915_master_private *master_priv;
25162 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
25163
25164- atomic_inc(&dev_priv->irq_received);
25165+ atomic_inc_unchecked(&dev_priv->irq_received);
25166
25167 if (IS_GEN6(dev))
25168 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
25169@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
25170 int ret = IRQ_NONE, pipe;
25171 bool blc_event = false;
25172
25173- atomic_inc(&dev_priv->irq_received);
25174+ atomic_inc_unchecked(&dev_priv->irq_received);
25175
25176 iir = I915_READ(IIR);
25177
25178@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
25179 {
25180 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25181
25182- atomic_set(&dev_priv->irq_received, 0);
25183+ atomic_set_unchecked(&dev_priv->irq_received, 0);
25184
25185 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25186 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25187@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
25188 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25189 int pipe;
25190
25191- atomic_set(&dev_priv->irq_received, 0);
25192+ atomic_set_unchecked(&dev_priv->irq_received, 0);
25193
25194 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25195 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25196diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
25197--- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
25198+++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
25199@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
25200
25201 wait_event(dev_priv->pending_flip_queue,
25202 atomic_read(&dev_priv->mm.wedged) ||
25203- atomic_read(&obj->pending_flip) == 0);
25204+ atomic_read_unchecked(&obj->pending_flip) == 0);
25205
25206 /* Big Hammer, we also need to ensure that any pending
25207 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
25208@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
25209 obj = to_intel_framebuffer(crtc->fb)->obj;
25210 dev_priv = crtc->dev->dev_private;
25211 wait_event(dev_priv->pending_flip_queue,
25212- atomic_read(&obj->pending_flip) == 0);
25213+ atomic_read_unchecked(&obj->pending_flip) == 0);
25214 }
25215
25216 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
25217@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
25218
25219 atomic_clear_mask(1 << intel_crtc->plane,
25220 &obj->pending_flip.counter);
25221- if (atomic_read(&obj->pending_flip) == 0)
25222+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
25223 wake_up(&dev_priv->pending_flip_queue);
25224
25225 schedule_work(&work->work);
25226@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
25227 /* Block clients from rendering to the new back buffer until
25228 * the flip occurs and the object is no longer visible.
25229 */
25230- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25231+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25232
25233 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
25234 if (ret)
25235@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
25236 return 0;
25237
25238 cleanup_pending:
25239- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25240+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25241 cleanup_objs:
25242 drm_gem_object_unreference(&work->old_fb_obj->base);
25243 drm_gem_object_unreference(&obj->base);
25244diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
25245--- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
25246+++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
25247@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
25248 u32 clear_cmd;
25249 u32 maccess;
25250
25251- atomic_t vbl_received; /**< Number of vblanks received. */
25252+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
25253 wait_queue_head_t fence_queue;
25254- atomic_t last_fence_retired;
25255+ atomic_unchecked_t last_fence_retired;
25256 u32 next_fence_to_post;
25257
25258 unsigned int fb_cpp;
25259diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
25260--- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
25261+++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
25262@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
25263 if (crtc != 0)
25264 return 0;
25265
25266- return atomic_read(&dev_priv->vbl_received);
25267+ return atomic_read_unchecked(&dev_priv->vbl_received);
25268 }
25269
25270
25271@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
25272 /* VBLANK interrupt */
25273 if (status & MGA_VLINEPEN) {
25274 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
25275- atomic_inc(&dev_priv->vbl_received);
25276+ atomic_inc_unchecked(&dev_priv->vbl_received);
25277 drm_handle_vblank(dev, 0);
25278 handled = 1;
25279 }
25280@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
25281 if ((prim_start & ~0x03) != (prim_end & ~0x03))
25282 MGA_WRITE(MGA_PRIMEND, prim_end);
25283
25284- atomic_inc(&dev_priv->last_fence_retired);
25285+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
25286 DRM_WAKEUP(&dev_priv->fence_queue);
25287 handled = 1;
25288 }
25289@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
25290 * using fences.
25291 */
25292 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
25293- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
25294+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
25295 - *sequence) <= (1 << 23)));
25296
25297 *sequence = cur_fence;
25298diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
25299--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
25300+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
25301@@ -200,7 +200,7 @@ struct methods {
25302 const char desc[8];
25303 void (*loadbios)(struct drm_device *, uint8_t *);
25304 const bool rw;
25305-};
25306+} __do_const;
25307
25308 static struct methods shadow_methods[] = {
25309 { "PRAMIN", load_vbios_pramin, true },
25310@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
25311 struct bit_table {
25312 const char id;
25313 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
25314-};
25315+} __no_const;
25316
25317 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
25318
25319diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
25320--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
25321+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
25322@@ -227,7 +227,7 @@ struct nouveau_channel {
25323 struct list_head pending;
25324 uint32_t sequence;
25325 uint32_t sequence_ack;
25326- atomic_t last_sequence_irq;
25327+ atomic_unchecked_t last_sequence_irq;
25328 } fence;
25329
25330 /* DMA push buffer */
25331@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
25332 u32 handle, u16 class);
25333 void (*set_tile_region)(struct drm_device *dev, int i);
25334 void (*tlb_flush)(struct drm_device *, int engine);
25335-};
25336+} __no_const;
25337
25338 struct nouveau_instmem_engine {
25339 void *priv;
25340@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
25341 struct nouveau_mc_engine {
25342 int (*init)(struct drm_device *dev);
25343 void (*takedown)(struct drm_device *dev);
25344-};
25345+} __no_const;
25346
25347 struct nouveau_timer_engine {
25348 int (*init)(struct drm_device *dev);
25349 void (*takedown)(struct drm_device *dev);
25350 uint64_t (*read)(struct drm_device *dev);
25351-};
25352+} __no_const;
25353
25354 struct nouveau_fb_engine {
25355 int num_tiles;
25356@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
25357 void (*put)(struct drm_device *, struct nouveau_mem **);
25358
25359 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
25360-};
25361+} __no_const;
25362
25363 struct nouveau_engine {
25364 struct nouveau_instmem_engine instmem;
25365@@ -640,7 +640,7 @@ struct drm_nouveau_private {
25366 struct drm_global_reference mem_global_ref;
25367 struct ttm_bo_global_ref bo_global_ref;
25368 struct ttm_bo_device bdev;
25369- atomic_t validate_sequence;
25370+ atomic_unchecked_t validate_sequence;
25371 } ttm;
25372
25373 struct {
25374diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
25375--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
25376+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
25377@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
25378 if (USE_REFCNT(dev))
25379 sequence = nvchan_rd32(chan, 0x48);
25380 else
25381- sequence = atomic_read(&chan->fence.last_sequence_irq);
25382+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
25383
25384 if (chan->fence.sequence_ack == sequence)
25385 goto out;
25386@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
25387
25388 INIT_LIST_HEAD(&chan->fence.pending);
25389 spin_lock_init(&chan->fence.lock);
25390- atomic_set(&chan->fence.last_sequence_irq, 0);
25391+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
25392 return 0;
25393 }
25394
25395diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
25396--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
25397+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
25398@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
25399 int trycnt = 0;
25400 int ret, i;
25401
25402- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
25403+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
25404 retry:
25405 if (++trycnt > 100000) {
25406 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
25407diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
25408--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
25409+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
25410@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
25411 bool can_switch;
25412
25413 spin_lock(&dev->count_lock);
25414- can_switch = (dev->open_count == 0);
25415+ can_switch = (local_read(&dev->open_count) == 0);
25416 spin_unlock(&dev->count_lock);
25417 return can_switch;
25418 }
25419diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
25420--- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
25421+++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
25422@@ -560,7 +560,7 @@ static int
25423 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
25424 u32 class, u32 mthd, u32 data)
25425 {
25426- atomic_set(&chan->fence.last_sequence_irq, data);
25427+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
25428 return 0;
25429 }
25430
25431diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
25432--- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
25433+++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
25434@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
25435
25436 /* GH: Simple idle check.
25437 */
25438- atomic_set(&dev_priv->idle_count, 0);
25439+ atomic_set_unchecked(&dev_priv->idle_count, 0);
25440
25441 /* We don't support anything other than bus-mastering ring mode,
25442 * but the ring can be in either AGP or PCI space for the ring
25443diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
25444--- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
25445+++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
25446@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
25447 int is_pci;
25448 unsigned long cce_buffers_offset;
25449
25450- atomic_t idle_count;
25451+ atomic_unchecked_t idle_count;
25452
25453 int page_flipping;
25454 int current_page;
25455 u32 crtc_offset;
25456 u32 crtc_offset_cntl;
25457
25458- atomic_t vbl_received;
25459+ atomic_unchecked_t vbl_received;
25460
25461 u32 color_fmt;
25462 unsigned int front_offset;
25463diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
25464--- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
25465+++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
25466@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
25467 if (crtc != 0)
25468 return 0;
25469
25470- return atomic_read(&dev_priv->vbl_received);
25471+ return atomic_read_unchecked(&dev_priv->vbl_received);
25472 }
25473
25474 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
25475@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
25476 /* VBLANK interrupt */
25477 if (status & R128_CRTC_VBLANK_INT) {
25478 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
25479- atomic_inc(&dev_priv->vbl_received);
25480+ atomic_inc_unchecked(&dev_priv->vbl_received);
25481 drm_handle_vblank(dev, 0);
25482 return IRQ_HANDLED;
25483 }
25484diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
25485--- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
25486+++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
25487@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25488
25489 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25490 {
25491- if (atomic_read(&dev_priv->idle_count) == 0)
25492+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25493 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25494 else
25495- atomic_set(&dev_priv->idle_count, 0);
25496+ atomic_set_unchecked(&dev_priv->idle_count, 0);
25497 }
25498
25499 #endif
25500diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
25501--- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
25502+++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
25503@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25504 char name[512];
25505 int i;
25506
25507+ pax_track_stack();
25508+
25509 ctx->card = card;
25510 ctx->bios = bios;
25511
25512diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
25513--- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
25514+++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
25515@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25516 regex_t mask_rex;
25517 regmatch_t match[4];
25518 char buf[1024];
25519- size_t end;
25520+ long end;
25521 int len;
25522 int done = 0;
25523 int r;
25524 unsigned o;
25525 struct offset *offset;
25526 char last_reg_s[10];
25527- int last_reg;
25528+ unsigned long last_reg;
25529
25530 if (regcomp
25531 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25532diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
25533--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
25534+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
25535@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25536 struct radeon_gpio_rec gpio;
25537 struct radeon_hpd hpd;
25538
25539+ pax_track_stack();
25540+
25541 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25542 return false;
25543
25544diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
25545--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
25546+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
25547@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
25548 bool can_switch;
25549
25550 spin_lock(&dev->count_lock);
25551- can_switch = (dev->open_count == 0);
25552+ can_switch = (local_read(&dev->open_count) == 0);
25553 spin_unlock(&dev->count_lock);
25554 return can_switch;
25555 }
25556diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25557--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
25558+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25559@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25560 uint32_t post_div;
25561 u32 pll_out_min, pll_out_max;
25562
25563+ pax_track_stack();
25564+
25565 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25566 freq = freq * 1000;
25567
25568diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25569--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25570+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25571@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25572
25573 /* SW interrupt */
25574 wait_queue_head_t swi_queue;
25575- atomic_t swi_emitted;
25576+ atomic_unchecked_t swi_emitted;
25577 int vblank_crtc;
25578 uint32_t irq_enable_reg;
25579 uint32_t r500_disp_irq_reg;
25580diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25581--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25582+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25583@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25584 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25585 return 0;
25586 }
25587- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25588+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25589 if (!rdev->cp.ready)
25590 /* FIXME: cp is not running assume everythings is done right
25591 * away
25592@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25593 return r;
25594 }
25595 radeon_fence_write(rdev, 0);
25596- atomic_set(&rdev->fence_drv.seq, 0);
25597+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25598 INIT_LIST_HEAD(&rdev->fence_drv.created);
25599 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25600 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25601diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25602--- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25603+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25604@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25605 */
25606 struct radeon_fence_driver {
25607 uint32_t scratch_reg;
25608- atomic_t seq;
25609+ atomic_unchecked_t seq;
25610 uint32_t last_seq;
25611 unsigned long last_jiffies;
25612 unsigned long last_timeout;
25613@@ -960,7 +960,7 @@ struct radeon_asic {
25614 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25615 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25616 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25617-};
25618+} __no_const;
25619
25620 /*
25621 * Asic structures
25622diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25623--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25624+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25625@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25626 request = compat_alloc_user_space(sizeof(*request));
25627 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25628 || __put_user(req32.param, &request->param)
25629- || __put_user((void __user *)(unsigned long)req32.value,
25630+ || __put_user((unsigned long)req32.value,
25631 &request->value))
25632 return -EFAULT;
25633
25634diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25635--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25636+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25637@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25638 unsigned int ret;
25639 RING_LOCALS;
25640
25641- atomic_inc(&dev_priv->swi_emitted);
25642- ret = atomic_read(&dev_priv->swi_emitted);
25643+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25644+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25645
25646 BEGIN_RING(4);
25647 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25648@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25649 drm_radeon_private_t *dev_priv =
25650 (drm_radeon_private_t *) dev->dev_private;
25651
25652- atomic_set(&dev_priv->swi_emitted, 0);
25653+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25654 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25655
25656 dev->max_vblank_count = 0x001fffff;
25657diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25658--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25659+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25660@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25661 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25662 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25663
25664- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25665+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25666 sarea_priv->nbox * sizeof(depth_boxes[0])))
25667 return -EFAULT;
25668
25669@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25670 {
25671 drm_radeon_private_t *dev_priv = dev->dev_private;
25672 drm_radeon_getparam_t *param = data;
25673- int value;
25674+ int value = 0;
25675
25676 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25677
25678diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25679--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25680+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25681@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25682 }
25683 if (unlikely(ttm_vm_ops == NULL)) {
25684 ttm_vm_ops = vma->vm_ops;
25685- radeon_ttm_vm_ops = *ttm_vm_ops;
25686- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25687+ pax_open_kernel();
25688+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25689+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25690+ pax_close_kernel();
25691 }
25692 vma->vm_ops = &radeon_ttm_vm_ops;
25693 return 0;
25694diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25695--- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25696+++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25697@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25698 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25699 rdev->pm.sideport_bandwidth.full)
25700 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25701- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25702+ read_delay_latency.full = dfixed_const(800 * 1000);
25703 read_delay_latency.full = dfixed_div(read_delay_latency,
25704 rdev->pm.igp_sideport_mclk);
25705+ a.full = dfixed_const(370);
25706+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25707 } else {
25708 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25709 rdev->pm.k8_bandwidth.full)
25710diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25711--- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25712+++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25713@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25714 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25715 struct shrink_control *sc)
25716 {
25717- static atomic_t start_pool = ATOMIC_INIT(0);
25718+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25719 unsigned i;
25720- unsigned pool_offset = atomic_add_return(1, &start_pool);
25721+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25722 struct ttm_page_pool *pool;
25723 int shrink_pages = sc->nr_to_scan;
25724
25725diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25726--- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25727+++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25728@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25729 typedef uint32_t maskarray_t[5];
25730
25731 typedef struct drm_via_irq {
25732- atomic_t irq_received;
25733+ atomic_unchecked_t irq_received;
25734 uint32_t pending_mask;
25735 uint32_t enable_mask;
25736 wait_queue_head_t irq_queue;
25737@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25738 struct timeval last_vblank;
25739 int last_vblank_valid;
25740 unsigned usec_per_vblank;
25741- atomic_t vbl_received;
25742+ atomic_unchecked_t vbl_received;
25743 drm_via_state_t hc_state;
25744 char pci_buf[VIA_PCI_BUF_SIZE];
25745 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25746diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25747--- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25748+++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25749@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25750 if (crtc != 0)
25751 return 0;
25752
25753- return atomic_read(&dev_priv->vbl_received);
25754+ return atomic_read_unchecked(&dev_priv->vbl_received);
25755 }
25756
25757 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25758@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25759
25760 status = VIA_READ(VIA_REG_INTERRUPT);
25761 if (status & VIA_IRQ_VBLANK_PENDING) {
25762- atomic_inc(&dev_priv->vbl_received);
25763- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25764+ atomic_inc_unchecked(&dev_priv->vbl_received);
25765+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25766 do_gettimeofday(&cur_vblank);
25767 if (dev_priv->last_vblank_valid) {
25768 dev_priv->usec_per_vblank =
25769@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25770 dev_priv->last_vblank = cur_vblank;
25771 dev_priv->last_vblank_valid = 1;
25772 }
25773- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25774+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25775 DRM_DEBUG("US per vblank is: %u\n",
25776 dev_priv->usec_per_vblank);
25777 }
25778@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25779
25780 for (i = 0; i < dev_priv->num_irqs; ++i) {
25781 if (status & cur_irq->pending_mask) {
25782- atomic_inc(&cur_irq->irq_received);
25783+ atomic_inc_unchecked(&cur_irq->irq_received);
25784 DRM_WAKEUP(&cur_irq->irq_queue);
25785 handled = 1;
25786 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25787@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25788 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25789 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25790 masks[irq][4]));
25791- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25792+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25793 } else {
25794 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25795 (((cur_irq_sequence =
25796- atomic_read(&cur_irq->irq_received)) -
25797+ atomic_read_unchecked(&cur_irq->irq_received)) -
25798 *sequence) <= (1 << 23)));
25799 }
25800 *sequence = cur_irq_sequence;
25801@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25802 }
25803
25804 for (i = 0; i < dev_priv->num_irqs; ++i) {
25805- atomic_set(&cur_irq->irq_received, 0);
25806+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25807 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25808 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25809 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25810@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25811 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25812 case VIA_IRQ_RELATIVE:
25813 irqwait->request.sequence +=
25814- atomic_read(&cur_irq->irq_received);
25815+ atomic_read_unchecked(&cur_irq->irq_received);
25816 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25817 case VIA_IRQ_ABSOLUTE:
25818 break;
25819diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25820--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25821+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25822@@ -240,7 +240,7 @@ struct vmw_private {
25823 * Fencing and IRQs.
25824 */
25825
25826- atomic_t fence_seq;
25827+ atomic_unchecked_t fence_seq;
25828 wait_queue_head_t fence_queue;
25829 wait_queue_head_t fifo_queue;
25830 atomic_t fence_queue_waiters;
25831diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25832--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25833+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25834@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25835 while (!vmw_lag_lt(queue, us)) {
25836 spin_lock(&queue->lock);
25837 if (list_empty(&queue->head))
25838- sequence = atomic_read(&dev_priv->fence_seq);
25839+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25840 else {
25841 fence = list_first_entry(&queue->head,
25842 struct vmw_fence, head);
25843diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25844--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25845+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25846@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25847 (unsigned int) min,
25848 (unsigned int) fifo->capabilities);
25849
25850- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25851+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25852 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25853 vmw_fence_queue_init(&fifo->fence_queue);
25854 return vmw_fifo_send_fence(dev_priv, &dummy);
25855@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25856
25857 fm = vmw_fifo_reserve(dev_priv, bytes);
25858 if (unlikely(fm == NULL)) {
25859- *sequence = atomic_read(&dev_priv->fence_seq);
25860+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25861 ret = -ENOMEM;
25862 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25863 false, 3*HZ);
25864@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25865 }
25866
25867 do {
25868- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25869+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25870 } while (*sequence == 0);
25871
25872 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25873diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25874--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25875+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25876@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25877 * emitted. Then the fence is stale and signaled.
25878 */
25879
25880- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25881+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25882 > VMW_FENCE_WRAP);
25883
25884 return ret;
25885@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25886
25887 if (fifo_idle)
25888 down_read(&fifo_state->rwsem);
25889- signal_seq = atomic_read(&dev_priv->fence_seq);
25890+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25891 ret = 0;
25892
25893 for (;;) {
25894diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25895--- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25896+++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25897@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25898
25899 int hid_add_device(struct hid_device *hdev)
25900 {
25901- static atomic_t id = ATOMIC_INIT(0);
25902+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25903 int ret;
25904
25905 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25906@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25907 /* XXX hack, any other cleaner solution after the driver core
25908 * is converted to allow more than 20 bytes as the device name? */
25909 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25910- hdev->vendor, hdev->product, atomic_inc_return(&id));
25911+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25912
25913 hid_debug_register(hdev, dev_name(&hdev->dev));
25914 ret = device_add(&hdev->dev);
25915diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25916--- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25917+++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25918@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25919 break;
25920
25921 case HIDIOCAPPLICATION:
25922- if (arg < 0 || arg >= hid->maxapplication)
25923+ if (arg >= hid->maxapplication)
25924 break;
25925
25926 for (i = 0; i < hid->maxcollection; i++)
25927diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25928--- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25929+++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25930@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25931 return res;
25932
25933 temp /= 1000;
25934- if (temp < 0)
25935- return -EINVAL;
25936
25937 mutex_lock(&resource->lock);
25938 resource->trip[attr->index - 7] = temp;
25939diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25940--- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25941+++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25942@@ -166,7 +166,7 @@ struct sht15_data {
25943 int supply_uV;
25944 bool supply_uV_valid;
25945 struct work_struct update_supply_work;
25946- atomic_t interrupt_handled;
25947+ atomic_unchecked_t interrupt_handled;
25948 };
25949
25950 /**
25951@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25952 return ret;
25953
25954 gpio_direction_input(data->pdata->gpio_data);
25955- atomic_set(&data->interrupt_handled, 0);
25956+ atomic_set_unchecked(&data->interrupt_handled, 0);
25957
25958 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25959 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25960 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25961 /* Only relevant if the interrupt hasn't occurred. */
25962- if (!atomic_read(&data->interrupt_handled))
25963+ if (!atomic_read_unchecked(&data->interrupt_handled))
25964 schedule_work(&data->read_work);
25965 }
25966 ret = wait_event_timeout(data->wait_queue,
25967@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25968
25969 /* First disable the interrupt */
25970 disable_irq_nosync(irq);
25971- atomic_inc(&data->interrupt_handled);
25972+ atomic_inc_unchecked(&data->interrupt_handled);
25973 /* Then schedule a reading work struct */
25974 if (data->state != SHT15_READING_NOTHING)
25975 schedule_work(&data->read_work);
25976@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25977 * If not, then start the interrupt again - care here as could
25978 * have gone low in meantime so verify it hasn't!
25979 */
25980- atomic_set(&data->interrupt_handled, 0);
25981+ atomic_set_unchecked(&data->interrupt_handled, 0);
25982 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25983 /* If still not occurred or another handler has been scheduled */
25984 if (gpio_get_value(data->pdata->gpio_data)
25985- || atomic_read(&data->interrupt_handled))
25986+ || atomic_read_unchecked(&data->interrupt_handled))
25987 return;
25988 }
25989
25990diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25991--- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25992+++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25993@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25994 struct i2c_board_info *info);
25995 static int w83791d_remove(struct i2c_client *client);
25996
25997-static int w83791d_read(struct i2c_client *client, u8 register);
25998-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25999+static int w83791d_read(struct i2c_client *client, u8 reg);
26000+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
26001 static struct w83791d_data *w83791d_update_device(struct device *dev);
26002
26003 #ifdef DEBUG
26004diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
26005--- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
26006+++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
26007@@ -43,7 +43,7 @@
26008 extern struct i2c_adapter amd756_smbus;
26009
26010 static struct i2c_adapter *s4882_adapter;
26011-static struct i2c_algorithm *s4882_algo;
26012+static i2c_algorithm_no_const *s4882_algo;
26013
26014 /* Wrapper access functions for multiplexed SMBus */
26015 static DEFINE_MUTEX(amd756_lock);
26016diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
26017--- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
26018+++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
26019@@ -41,7 +41,7 @@
26020 extern struct i2c_adapter *nforce2_smbus;
26021
26022 static struct i2c_adapter *s4985_adapter;
26023-static struct i2c_algorithm *s4985_algo;
26024+static i2c_algorithm_no_const *s4985_algo;
26025
26026 /* Wrapper access functions for multiplexed SMBus */
26027 static DEFINE_MUTEX(nforce2_lock);
26028diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
26029--- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
26030+++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
26031@@ -28,7 +28,7 @@
26032 /* multiplexer per channel data */
26033 struct i2c_mux_priv {
26034 struct i2c_adapter adap;
26035- struct i2c_algorithm algo;
26036+ i2c_algorithm_no_const algo;
26037
26038 struct i2c_adapter *parent;
26039 void *mux_dev; /* the mux chip/device */
26040diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
26041--- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
26042+++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
26043@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
26044 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
26045 if ((unsigned long)buf & alignment
26046 || blk_rq_bytes(rq) & q->dma_pad_mask
26047- || object_is_on_stack(buf))
26048+ || object_starts_on_stack(buf))
26049 drive->dma = 0;
26050 }
26051 }
26052diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
26053--- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
26054+++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
26055@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
26056 u8 pc_buf[256], header_len, desc_cnt;
26057 int i, rc = 1, blocks, length;
26058
26059+ pax_track_stack();
26060+
26061 ide_debug_log(IDE_DBG_FUNC, "enter");
26062
26063 drive->bios_cyl = 0;
26064diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
26065--- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
26066+++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
26067@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
26068 int ret, i, n_ports = dev2 ? 4 : 2;
26069 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
26070
26071+ pax_track_stack();
26072+
26073 for (i = 0; i < n_ports / 2; i++) {
26074 ret = ide_setup_pci_controller(pdev[i], d, !i);
26075 if (ret < 0)
26076diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
26077--- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
26078+++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
26079@@ -113,7 +113,7 @@ static char const counter_group_names[CM
26080
26081 struct cm_counter_group {
26082 struct kobject obj;
26083- atomic_long_t counter[CM_ATTR_COUNT];
26084+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
26085 };
26086
26087 struct cm_counter_attribute {
26088@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
26089 struct ib_mad_send_buf *msg = NULL;
26090 int ret;
26091
26092- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26093+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26094 counter[CM_REQ_COUNTER]);
26095
26096 /* Quick state check to discard duplicate REQs. */
26097@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
26098 if (!cm_id_priv)
26099 return;
26100
26101- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26102+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26103 counter[CM_REP_COUNTER]);
26104 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
26105 if (ret)
26106@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
26107 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
26108 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
26109 spin_unlock_irq(&cm_id_priv->lock);
26110- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26111+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26112 counter[CM_RTU_COUNTER]);
26113 goto out;
26114 }
26115@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
26116 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
26117 dreq_msg->local_comm_id);
26118 if (!cm_id_priv) {
26119- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26120+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26121 counter[CM_DREQ_COUNTER]);
26122 cm_issue_drep(work->port, work->mad_recv_wc);
26123 return -EINVAL;
26124@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
26125 case IB_CM_MRA_REP_RCVD:
26126 break;
26127 case IB_CM_TIMEWAIT:
26128- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26129+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26130 counter[CM_DREQ_COUNTER]);
26131 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26132 goto unlock;
26133@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
26134 cm_free_msg(msg);
26135 goto deref;
26136 case IB_CM_DREQ_RCVD:
26137- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26138+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26139 counter[CM_DREQ_COUNTER]);
26140 goto unlock;
26141 default:
26142@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
26143 ib_modify_mad(cm_id_priv->av.port->mad_agent,
26144 cm_id_priv->msg, timeout)) {
26145 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
26146- atomic_long_inc(&work->port->
26147+ atomic_long_inc_unchecked(&work->port->
26148 counter_group[CM_RECV_DUPLICATES].
26149 counter[CM_MRA_COUNTER]);
26150 goto out;
26151@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
26152 break;
26153 case IB_CM_MRA_REQ_RCVD:
26154 case IB_CM_MRA_REP_RCVD:
26155- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26156+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26157 counter[CM_MRA_COUNTER]);
26158 /* fall through */
26159 default:
26160@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
26161 case IB_CM_LAP_IDLE:
26162 break;
26163 case IB_CM_MRA_LAP_SENT:
26164- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26165+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26166 counter[CM_LAP_COUNTER]);
26167 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26168 goto unlock;
26169@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
26170 cm_free_msg(msg);
26171 goto deref;
26172 case IB_CM_LAP_RCVD:
26173- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26174+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26175 counter[CM_LAP_COUNTER]);
26176 goto unlock;
26177 default:
26178@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
26179 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
26180 if (cur_cm_id_priv) {
26181 spin_unlock_irq(&cm.lock);
26182- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26183+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26184 counter[CM_SIDR_REQ_COUNTER]);
26185 goto out; /* Duplicate message. */
26186 }
26187@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
26188 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
26189 msg->retries = 1;
26190
26191- atomic_long_add(1 + msg->retries,
26192+ atomic_long_add_unchecked(1 + msg->retries,
26193 &port->counter_group[CM_XMIT].counter[attr_index]);
26194 if (msg->retries)
26195- atomic_long_add(msg->retries,
26196+ atomic_long_add_unchecked(msg->retries,
26197 &port->counter_group[CM_XMIT_RETRIES].
26198 counter[attr_index]);
26199
26200@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
26201 }
26202
26203 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
26204- atomic_long_inc(&port->counter_group[CM_RECV].
26205+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
26206 counter[attr_id - CM_ATTR_ID_OFFSET]);
26207
26208 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
26209@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
26210 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
26211
26212 return sprintf(buf, "%ld\n",
26213- atomic_long_read(&group->counter[cm_attr->index]));
26214+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
26215 }
26216
26217 static const struct sysfs_ops cm_counter_ops = {
26218diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
26219--- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
26220+++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
26221@@ -97,8 +97,8 @@ struct ib_fmr_pool {
26222
26223 struct task_struct *thread;
26224
26225- atomic_t req_ser;
26226- atomic_t flush_ser;
26227+ atomic_unchecked_t req_ser;
26228+ atomic_unchecked_t flush_ser;
26229
26230 wait_queue_head_t force_wait;
26231 };
26232@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
26233 struct ib_fmr_pool *pool = pool_ptr;
26234
26235 do {
26236- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
26237+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
26238 ib_fmr_batch_release(pool);
26239
26240- atomic_inc(&pool->flush_ser);
26241+ atomic_inc_unchecked(&pool->flush_ser);
26242 wake_up_interruptible(&pool->force_wait);
26243
26244 if (pool->flush_function)
26245@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
26246 }
26247
26248 set_current_state(TASK_INTERRUPTIBLE);
26249- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
26250+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
26251 !kthread_should_stop())
26252 schedule();
26253 __set_current_state(TASK_RUNNING);
26254@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
26255 pool->dirty_watermark = params->dirty_watermark;
26256 pool->dirty_len = 0;
26257 spin_lock_init(&pool->pool_lock);
26258- atomic_set(&pool->req_ser, 0);
26259- atomic_set(&pool->flush_ser, 0);
26260+ atomic_set_unchecked(&pool->req_ser, 0);
26261+ atomic_set_unchecked(&pool->flush_ser, 0);
26262 init_waitqueue_head(&pool->force_wait);
26263
26264 pool->thread = kthread_run(ib_fmr_cleanup_thread,
26265@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
26266 }
26267 spin_unlock_irq(&pool->pool_lock);
26268
26269- serial = atomic_inc_return(&pool->req_ser);
26270+ serial = atomic_inc_return_unchecked(&pool->req_ser);
26271 wake_up_process(pool->thread);
26272
26273 if (wait_event_interruptible(pool->force_wait,
26274- atomic_read(&pool->flush_ser) - serial >= 0))
26275+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
26276 return -EINTR;
26277
26278 return 0;
26279@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
26280 } else {
26281 list_add_tail(&fmr->list, &pool->dirty_list);
26282 if (++pool->dirty_len >= pool->dirty_watermark) {
26283- atomic_inc(&pool->req_ser);
26284+ atomic_inc_unchecked(&pool->req_ser);
26285 wake_up_process(pool->thread);
26286 }
26287 }
26288diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
26289--- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
26290+++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
26291@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
26292 int err;
26293 struct fw_ri_tpte tpt;
26294 u32 stag_idx;
26295- static atomic_t key;
26296+ static atomic_unchecked_t key;
26297
26298 if (c4iw_fatal_error(rdev))
26299 return -EIO;
26300@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
26301 &rdev->resource.tpt_fifo_lock);
26302 if (!stag_idx)
26303 return -ENOMEM;
26304- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
26305+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
26306 }
26307 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
26308 __func__, stag_state, type, pdid, stag_idx);
26309diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
26310--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
26311+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
26312@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
26313 struct infinipath_counters counters;
26314 struct ipath_devdata *dd;
26315
26316+ pax_track_stack();
26317+
26318 dd = file->f_path.dentry->d_inode->i_private;
26319 dd->ipath_f_read_counters(dd, &counters);
26320
26321diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
26322--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
26323+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
26324@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
26325 struct ib_atomic_eth *ateth;
26326 struct ipath_ack_entry *e;
26327 u64 vaddr;
26328- atomic64_t *maddr;
26329+ atomic64_unchecked_t *maddr;
26330 u64 sdata;
26331 u32 rkey;
26332 u8 next;
26333@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
26334 IB_ACCESS_REMOTE_ATOMIC)))
26335 goto nack_acc_unlck;
26336 /* Perform atomic OP and save result. */
26337- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
26338+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
26339 sdata = be64_to_cpu(ateth->swap_data);
26340 e = &qp->s_ack_queue[qp->r_head_ack_queue];
26341 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
26342- (u64) atomic64_add_return(sdata, maddr) - sdata :
26343+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
26344 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
26345 be64_to_cpu(ateth->compare_data),
26346 sdata);
26347diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
26348--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
26349+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
26350@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
26351 unsigned long flags;
26352 struct ib_wc wc;
26353 u64 sdata;
26354- atomic64_t *maddr;
26355+ atomic64_unchecked_t *maddr;
26356 enum ib_wc_status send_status;
26357
26358 /*
26359@@ -382,11 +382,11 @@ again:
26360 IB_ACCESS_REMOTE_ATOMIC)))
26361 goto acc_err;
26362 /* Perform atomic OP and save result. */
26363- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
26364+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
26365 sdata = wqe->wr.wr.atomic.compare_add;
26366 *(u64 *) sqp->s_sge.sge.vaddr =
26367 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
26368- (u64) atomic64_add_return(sdata, maddr) - sdata :
26369+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
26370 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
26371 sdata, wqe->wr.wr.atomic.swap);
26372 goto send_comp;
26373diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
26374--- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
26375+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
26376@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
26377 LIST_HEAD(nes_adapter_list);
26378 static LIST_HEAD(nes_dev_list);
26379
26380-atomic_t qps_destroyed;
26381+atomic_unchecked_t qps_destroyed;
26382
26383 static unsigned int ee_flsh_adapter;
26384 static unsigned int sysfs_nonidx_addr;
26385@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
26386 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
26387 struct nes_adapter *nesadapter = nesdev->nesadapter;
26388
26389- atomic_inc(&qps_destroyed);
26390+ atomic_inc_unchecked(&qps_destroyed);
26391
26392 /* Free the control structures */
26393
26394diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
26395--- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
26396+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
26397@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
26398 u32 cm_packets_retrans;
26399 u32 cm_packets_created;
26400 u32 cm_packets_received;
26401-atomic_t cm_listens_created;
26402-atomic_t cm_listens_destroyed;
26403+atomic_unchecked_t cm_listens_created;
26404+atomic_unchecked_t cm_listens_destroyed;
26405 u32 cm_backlog_drops;
26406-atomic_t cm_loopbacks;
26407-atomic_t cm_nodes_created;
26408-atomic_t cm_nodes_destroyed;
26409-atomic_t cm_accel_dropped_pkts;
26410-atomic_t cm_resets_recvd;
26411+atomic_unchecked_t cm_loopbacks;
26412+atomic_unchecked_t cm_nodes_created;
26413+atomic_unchecked_t cm_nodes_destroyed;
26414+atomic_unchecked_t cm_accel_dropped_pkts;
26415+atomic_unchecked_t cm_resets_recvd;
26416
26417 static inline int mini_cm_accelerated(struct nes_cm_core *,
26418 struct nes_cm_node *);
26419@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
26420
26421 static struct nes_cm_core *g_cm_core;
26422
26423-atomic_t cm_connects;
26424-atomic_t cm_accepts;
26425-atomic_t cm_disconnects;
26426-atomic_t cm_closes;
26427-atomic_t cm_connecteds;
26428-atomic_t cm_connect_reqs;
26429-atomic_t cm_rejects;
26430+atomic_unchecked_t cm_connects;
26431+atomic_unchecked_t cm_accepts;
26432+atomic_unchecked_t cm_disconnects;
26433+atomic_unchecked_t cm_closes;
26434+atomic_unchecked_t cm_connecteds;
26435+atomic_unchecked_t cm_connect_reqs;
26436+atomic_unchecked_t cm_rejects;
26437
26438
26439 /**
26440@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
26441 kfree(listener);
26442 listener = NULL;
26443 ret = 0;
26444- atomic_inc(&cm_listens_destroyed);
26445+ atomic_inc_unchecked(&cm_listens_destroyed);
26446 } else {
26447 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
26448 }
26449@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
26450 cm_node->rem_mac);
26451
26452 add_hte_node(cm_core, cm_node);
26453- atomic_inc(&cm_nodes_created);
26454+ atomic_inc_unchecked(&cm_nodes_created);
26455
26456 return cm_node;
26457 }
26458@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
26459 }
26460
26461 atomic_dec(&cm_core->node_cnt);
26462- atomic_inc(&cm_nodes_destroyed);
26463+ atomic_inc_unchecked(&cm_nodes_destroyed);
26464 nesqp = cm_node->nesqp;
26465 if (nesqp) {
26466 nesqp->cm_node = NULL;
26467@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
26468
26469 static void drop_packet(struct sk_buff *skb)
26470 {
26471- atomic_inc(&cm_accel_dropped_pkts);
26472+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
26473 dev_kfree_skb_any(skb);
26474 }
26475
26476@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
26477 {
26478
26479 int reset = 0; /* whether to send reset in case of err.. */
26480- atomic_inc(&cm_resets_recvd);
26481+ atomic_inc_unchecked(&cm_resets_recvd);
26482 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
26483 " refcnt=%d\n", cm_node, cm_node->state,
26484 atomic_read(&cm_node->ref_count));
26485@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
26486 rem_ref_cm_node(cm_node->cm_core, cm_node);
26487 return NULL;
26488 }
26489- atomic_inc(&cm_loopbacks);
26490+ atomic_inc_unchecked(&cm_loopbacks);
26491 loopbackremotenode->loopbackpartner = cm_node;
26492 loopbackremotenode->tcp_cntxt.rcv_wscale =
26493 NES_CM_DEFAULT_RCV_WND_SCALE;
26494@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26495 add_ref_cm_node(cm_node);
26496 } else if (cm_node->state == NES_CM_STATE_TSA) {
26497 rem_ref_cm_node(cm_core, cm_node);
26498- atomic_inc(&cm_accel_dropped_pkts);
26499+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
26500 dev_kfree_skb_any(skb);
26501 break;
26502 }
26503@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26504
26505 if ((cm_id) && (cm_id->event_handler)) {
26506 if (issue_disconn) {
26507- atomic_inc(&cm_disconnects);
26508+ atomic_inc_unchecked(&cm_disconnects);
26509 cm_event.event = IW_CM_EVENT_DISCONNECT;
26510 cm_event.status = disconn_status;
26511 cm_event.local_addr = cm_id->local_addr;
26512@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26513 }
26514
26515 if (issue_close) {
26516- atomic_inc(&cm_closes);
26517+ atomic_inc_unchecked(&cm_closes);
26518 nes_disconnect(nesqp, 1);
26519
26520 cm_id->provider_data = nesqp;
26521@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26522
26523 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26524 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26525- atomic_inc(&cm_accepts);
26526+ atomic_inc_unchecked(&cm_accepts);
26527
26528 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26529 netdev_refcnt_read(nesvnic->netdev));
26530@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26531
26532 struct nes_cm_core *cm_core;
26533
26534- atomic_inc(&cm_rejects);
26535+ atomic_inc_unchecked(&cm_rejects);
26536 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26537 loopback = cm_node->loopbackpartner;
26538 cm_core = cm_node->cm_core;
26539@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26540 ntohl(cm_id->local_addr.sin_addr.s_addr),
26541 ntohs(cm_id->local_addr.sin_port));
26542
26543- atomic_inc(&cm_connects);
26544+ atomic_inc_unchecked(&cm_connects);
26545 nesqp->active_conn = 1;
26546
26547 /* cache the cm_id in the qp */
26548@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26549 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26550 return err;
26551 }
26552- atomic_inc(&cm_listens_created);
26553+ atomic_inc_unchecked(&cm_listens_created);
26554 }
26555
26556 cm_id->add_ref(cm_id);
26557@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26558 if (nesqp->destroyed) {
26559 return;
26560 }
26561- atomic_inc(&cm_connecteds);
26562+ atomic_inc_unchecked(&cm_connecteds);
26563 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26564 " local port 0x%04X. jiffies = %lu.\n",
26565 nesqp->hwqp.qp_id,
26566@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26567
26568 cm_id->add_ref(cm_id);
26569 ret = cm_id->event_handler(cm_id, &cm_event);
26570- atomic_inc(&cm_closes);
26571+ atomic_inc_unchecked(&cm_closes);
26572 cm_event.event = IW_CM_EVENT_CLOSE;
26573 cm_event.status = 0;
26574 cm_event.provider_data = cm_id->provider_data;
26575@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26576 return;
26577 cm_id = cm_node->cm_id;
26578
26579- atomic_inc(&cm_connect_reqs);
26580+ atomic_inc_unchecked(&cm_connect_reqs);
26581 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26582 cm_node, cm_id, jiffies);
26583
26584@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26585 return;
26586 cm_id = cm_node->cm_id;
26587
26588- atomic_inc(&cm_connect_reqs);
26589+ atomic_inc_unchecked(&cm_connect_reqs);
26590 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26591 cm_node, cm_id, jiffies);
26592
26593diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26594--- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26595+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26596@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26597 extern unsigned int wqm_quanta;
26598 extern struct list_head nes_adapter_list;
26599
26600-extern atomic_t cm_connects;
26601-extern atomic_t cm_accepts;
26602-extern atomic_t cm_disconnects;
26603-extern atomic_t cm_closes;
26604-extern atomic_t cm_connecteds;
26605-extern atomic_t cm_connect_reqs;
26606-extern atomic_t cm_rejects;
26607-extern atomic_t mod_qp_timouts;
26608-extern atomic_t qps_created;
26609-extern atomic_t qps_destroyed;
26610-extern atomic_t sw_qps_destroyed;
26611+extern atomic_unchecked_t cm_connects;
26612+extern atomic_unchecked_t cm_accepts;
26613+extern atomic_unchecked_t cm_disconnects;
26614+extern atomic_unchecked_t cm_closes;
26615+extern atomic_unchecked_t cm_connecteds;
26616+extern atomic_unchecked_t cm_connect_reqs;
26617+extern atomic_unchecked_t cm_rejects;
26618+extern atomic_unchecked_t mod_qp_timouts;
26619+extern atomic_unchecked_t qps_created;
26620+extern atomic_unchecked_t qps_destroyed;
26621+extern atomic_unchecked_t sw_qps_destroyed;
26622 extern u32 mh_detected;
26623 extern u32 mh_pauses_sent;
26624 extern u32 cm_packets_sent;
26625@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26626 extern u32 cm_packets_received;
26627 extern u32 cm_packets_dropped;
26628 extern u32 cm_packets_retrans;
26629-extern atomic_t cm_listens_created;
26630-extern atomic_t cm_listens_destroyed;
26631+extern atomic_unchecked_t cm_listens_created;
26632+extern atomic_unchecked_t cm_listens_destroyed;
26633 extern u32 cm_backlog_drops;
26634-extern atomic_t cm_loopbacks;
26635-extern atomic_t cm_nodes_created;
26636-extern atomic_t cm_nodes_destroyed;
26637-extern atomic_t cm_accel_dropped_pkts;
26638-extern atomic_t cm_resets_recvd;
26639+extern atomic_unchecked_t cm_loopbacks;
26640+extern atomic_unchecked_t cm_nodes_created;
26641+extern atomic_unchecked_t cm_nodes_destroyed;
26642+extern atomic_unchecked_t cm_accel_dropped_pkts;
26643+extern atomic_unchecked_t cm_resets_recvd;
26644
26645 extern u32 int_mod_timer_init;
26646 extern u32 int_mod_cq_depth_256;
26647diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26648--- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26649+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26650@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26651 target_stat_values[++index] = mh_detected;
26652 target_stat_values[++index] = mh_pauses_sent;
26653 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26654- target_stat_values[++index] = atomic_read(&cm_connects);
26655- target_stat_values[++index] = atomic_read(&cm_accepts);
26656- target_stat_values[++index] = atomic_read(&cm_disconnects);
26657- target_stat_values[++index] = atomic_read(&cm_connecteds);
26658- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26659- target_stat_values[++index] = atomic_read(&cm_rejects);
26660- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26661- target_stat_values[++index] = atomic_read(&qps_created);
26662- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26663- target_stat_values[++index] = atomic_read(&qps_destroyed);
26664- target_stat_values[++index] = atomic_read(&cm_closes);
26665+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26666+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26667+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26668+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26669+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26670+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26671+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26672+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26673+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26674+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26675+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26676 target_stat_values[++index] = cm_packets_sent;
26677 target_stat_values[++index] = cm_packets_bounced;
26678 target_stat_values[++index] = cm_packets_created;
26679 target_stat_values[++index] = cm_packets_received;
26680 target_stat_values[++index] = cm_packets_dropped;
26681 target_stat_values[++index] = cm_packets_retrans;
26682- target_stat_values[++index] = atomic_read(&cm_listens_created);
26683- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26684+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26685+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26686 target_stat_values[++index] = cm_backlog_drops;
26687- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26688- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26689- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26690- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26691- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26692+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26693+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26694+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26695+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26696+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26697 target_stat_values[++index] = nesadapter->free_4kpbl;
26698 target_stat_values[++index] = nesadapter->free_256pbl;
26699 target_stat_values[++index] = int_mod_timer_init;
26700diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26701--- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26702+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26703@@ -46,9 +46,9 @@
26704
26705 #include <rdma/ib_umem.h>
26706
26707-atomic_t mod_qp_timouts;
26708-atomic_t qps_created;
26709-atomic_t sw_qps_destroyed;
26710+atomic_unchecked_t mod_qp_timouts;
26711+atomic_unchecked_t qps_created;
26712+atomic_unchecked_t sw_qps_destroyed;
26713
26714 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26715
26716@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26717 if (init_attr->create_flags)
26718 return ERR_PTR(-EINVAL);
26719
26720- atomic_inc(&qps_created);
26721+ atomic_inc_unchecked(&qps_created);
26722 switch (init_attr->qp_type) {
26723 case IB_QPT_RC:
26724 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26725@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26726 struct iw_cm_event cm_event;
26727 int ret;
26728
26729- atomic_inc(&sw_qps_destroyed);
26730+ atomic_inc_unchecked(&sw_qps_destroyed);
26731 nesqp->destroyed = 1;
26732
26733 /* Blow away the connection if it exists. */
26734diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26735--- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26736+++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26737@@ -51,6 +51,7 @@
26738 #include <linux/completion.h>
26739 #include <linux/kref.h>
26740 #include <linux/sched.h>
26741+#include <linux/slab.h>
26742
26743 #include "qib_common.h"
26744 #include "qib_verbs.h"
26745diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26746--- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26747+++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26748@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26749 */
26750 static void gameport_init_port(struct gameport *gameport)
26751 {
26752- static atomic_t gameport_no = ATOMIC_INIT(0);
26753+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26754
26755 __module_get(THIS_MODULE);
26756
26757 mutex_init(&gameport->drv_mutex);
26758 device_initialize(&gameport->dev);
26759 dev_set_name(&gameport->dev, "gameport%lu",
26760- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26761+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26762 gameport->dev.bus = &gameport_bus;
26763 gameport->dev.release = gameport_release_port;
26764 if (gameport->parent)
26765diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26766--- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26767+++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26768@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26769 */
26770 int input_register_device(struct input_dev *dev)
26771 {
26772- static atomic_t input_no = ATOMIC_INIT(0);
26773+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26774 struct input_handler *handler;
26775 const char *path;
26776 int error;
26777@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26778 dev->setkeycode = input_default_setkeycode;
26779
26780 dev_set_name(&dev->dev, "input%ld",
26781- (unsigned long) atomic_inc_return(&input_no) - 1);
26782+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26783
26784 error = device_add(&dev->dev);
26785 if (error)
26786diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26787--- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26788+++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26789@@ -30,6 +30,7 @@
26790 #include <linux/kernel.h>
26791 #include <linux/module.h>
26792 #include <linux/slab.h>
26793+#include <linux/sched.h>
26794 #include <linux/init.h>
26795 #include <linux/input.h>
26796 #include <linux/gameport.h>
26797@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26798 unsigned char buf[SW_LENGTH];
26799 int i;
26800
26801+ pax_track_stack();
26802+
26803 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26804
26805 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26806diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26807--- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26808+++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26809@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26810
26811 static int xpad_led_probe(struct usb_xpad *xpad)
26812 {
26813- static atomic_t led_seq = ATOMIC_INIT(0);
26814+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26815 long led_no;
26816 struct xpad_led *led;
26817 struct led_classdev *led_cdev;
26818@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26819 if (!led)
26820 return -ENOMEM;
26821
26822- led_no = (long)atomic_inc_return(&led_seq) - 1;
26823+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26824
26825 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26826 led->xpad = xpad;
26827diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26828--- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26829+++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26830@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26831
26832 spin_unlock_irq(&client->packet_lock);
26833
26834- if (copy_to_user(buffer, data, count))
26835+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26836 return -EFAULT;
26837
26838 return count;
26839diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26840--- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26841+++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26842@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26843 */
26844 static void serio_init_port(struct serio *serio)
26845 {
26846- static atomic_t serio_no = ATOMIC_INIT(0);
26847+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26848
26849 __module_get(THIS_MODULE);
26850
26851@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26852 mutex_init(&serio->drv_mutex);
26853 device_initialize(&serio->dev);
26854 dev_set_name(&serio->dev, "serio%ld",
26855- (long)atomic_inc_return(&serio_no) - 1);
26856+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26857 serio->dev.bus = &serio_bus;
26858 serio->dev.release = serio_release_port;
26859 serio->dev.groups = serio_device_attr_groups;
26860diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26861--- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26862+++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26863@@ -83,8 +83,8 @@ struct capiminor {
26864
26865 struct capi20_appl *ap;
26866 u32 ncci;
26867- atomic_t datahandle;
26868- atomic_t msgid;
26869+ atomic_unchecked_t datahandle;
26870+ atomic_unchecked_t msgid;
26871
26872 struct tty_port port;
26873 int ttyinstop;
26874@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26875 capimsg_setu16(s, 2, mp->ap->applid);
26876 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26877 capimsg_setu8 (s, 5, CAPI_RESP);
26878- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26879+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26880 capimsg_setu32(s, 8, mp->ncci);
26881 capimsg_setu16(s, 12, datahandle);
26882 }
26883@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26884 mp->outbytes -= len;
26885 spin_unlock_bh(&mp->outlock);
26886
26887- datahandle = atomic_inc_return(&mp->datahandle);
26888+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26889 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26890 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26891 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26892 capimsg_setu16(skb->data, 2, mp->ap->applid);
26893 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26894 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26895- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26896+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26897 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26898 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26899 capimsg_setu16(skb->data, 16, len); /* Data length */
26900diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26901--- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26902+++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26903@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26904 cs->commands_pending = 0;
26905 cs->cur_at_seq = 0;
26906 cs->gotfwver = -1;
26907- cs->open_count = 0;
26908+ local_set(&cs->open_count, 0);
26909 cs->dev = NULL;
26910 cs->tty = NULL;
26911 cs->tty_dev = NULL;
26912diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26913--- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26914+++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26915@@ -35,6 +35,7 @@
26916 #include <linux/tty_driver.h>
26917 #include <linux/list.h>
26918 #include <asm/atomic.h>
26919+#include <asm/local.h>
26920
26921 #define GIG_VERSION {0, 5, 0, 0}
26922 #define GIG_COMPAT {0, 4, 0, 0}
26923@@ -433,7 +434,7 @@ struct cardstate {
26924 spinlock_t cmdlock;
26925 unsigned curlen, cmdbytes;
26926
26927- unsigned open_count;
26928+ local_t open_count;
26929 struct tty_struct *tty;
26930 struct tasklet_struct if_wake_tasklet;
26931 unsigned control_state;
26932diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26933--- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26934+++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26935@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26936 }
26937 tty->driver_data = cs;
26938
26939- ++cs->open_count;
26940-
26941- if (cs->open_count == 1) {
26942+ if (local_inc_return(&cs->open_count) == 1) {
26943 spin_lock_irqsave(&cs->lock, flags);
26944 cs->tty = tty;
26945 spin_unlock_irqrestore(&cs->lock, flags);
26946@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26947
26948 if (!cs->connected)
26949 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26950- else if (!cs->open_count)
26951+ else if (!local_read(&cs->open_count))
26952 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26953 else {
26954- if (!--cs->open_count) {
26955+ if (!local_dec_return(&cs->open_count)) {
26956 spin_lock_irqsave(&cs->lock, flags);
26957 cs->tty = NULL;
26958 spin_unlock_irqrestore(&cs->lock, flags);
26959@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26960 if (!cs->connected) {
26961 gig_dbg(DEBUG_IF, "not connected");
26962 retval = -ENODEV;
26963- } else if (!cs->open_count)
26964+ } else if (!local_read(&cs->open_count))
26965 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26966 else {
26967 retval = 0;
26968@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26969 retval = -ENODEV;
26970 goto done;
26971 }
26972- if (!cs->open_count) {
26973+ if (!local_read(&cs->open_count)) {
26974 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26975 retval = -ENODEV;
26976 goto done;
26977@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26978 if (!cs->connected) {
26979 gig_dbg(DEBUG_IF, "not connected");
26980 retval = -ENODEV;
26981- } else if (!cs->open_count)
26982+ } else if (!local_read(&cs->open_count))
26983 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26984 else if (cs->mstate != MS_LOCKED) {
26985 dev_warn(cs->dev, "can't write to unlocked device\n");
26986@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26987
26988 if (!cs->connected)
26989 gig_dbg(DEBUG_IF, "not connected");
26990- else if (!cs->open_count)
26991+ else if (!local_read(&cs->open_count))
26992 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26993 else if (cs->mstate != MS_LOCKED)
26994 dev_warn(cs->dev, "can't write to unlocked device\n");
26995@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26996
26997 if (!cs->connected)
26998 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26999- else if (!cs->open_count)
27000+ else if (!local_read(&cs->open_count))
27001 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27002 else
27003 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27004@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
27005
27006 if (!cs->connected)
27007 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27008- else if (!cs->open_count)
27009+ else if (!local_read(&cs->open_count))
27010 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27011 else
27012 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27013@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
27014 goto out;
27015 }
27016
27017- if (!cs->open_count) {
27018+ if (!local_read(&cs->open_count)) {
27019 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27020 goto out;
27021 }
27022diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
27023--- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
27024+++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
27025@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
27026 }
27027 if (left) {
27028 if (t4file->user) {
27029- if (copy_from_user(buf, dp, left))
27030+ if (left > sizeof buf || copy_from_user(buf, dp, left))
27031 return -EFAULT;
27032 } else {
27033 memcpy(buf, dp, left);
27034@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
27035 }
27036 if (left) {
27037 if (config->user) {
27038- if (copy_from_user(buf, dp, left))
27039+ if (left > sizeof buf || copy_from_user(buf, dp, left))
27040 return -EFAULT;
27041 } else {
27042 memcpy(buf, dp, left);
27043diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
27044--- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
27045+++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
27046@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
27047 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
27048 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
27049
27050+ pax_track_stack();
27051
27052 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
27053 {
27054diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
27055--- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
27056+++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
27057@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
27058 IDI_SYNC_REQ req;
27059 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27060
27061+ pax_track_stack();
27062+
27063 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27064
27065 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27066diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
27067--- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
27068+++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
27069@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
27070 IDI_SYNC_REQ req;
27071 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27072
27073+ pax_track_stack();
27074+
27075 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27076
27077 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27078diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
27079--- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
27080+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
27081@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
27082 IDI_SYNC_REQ req;
27083 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27084
27085+ pax_track_stack();
27086+
27087 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27088
27089 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27090diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
27091--- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
27092+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
27093@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
27094 } diva_didd_add_adapter_t;
27095 typedef struct _diva_didd_remove_adapter {
27096 IDI_CALL p_request;
27097-} diva_didd_remove_adapter_t;
27098+} __no_const diva_didd_remove_adapter_t;
27099 typedef struct _diva_didd_read_adapter_array {
27100 void * buffer;
27101 dword length;
27102diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
27103--- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
27104+++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
27105@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
27106 IDI_SYNC_REQ req;
27107 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27108
27109+ pax_track_stack();
27110+
27111 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27112
27113 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27114diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
27115--- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
27116+++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
27117@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
27118 dword d;
27119 word w;
27120
27121+ pax_track_stack();
27122+
27123 a = plci->adapter;
27124 Id = ((word)plci->Id<<8)|a->Id;
27125 PUT_WORD(&SS_Ind[4],0x0000);
27126@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
27127 word j, n, w;
27128 dword d;
27129
27130+ pax_track_stack();
27131+
27132
27133 for(i=0;i<8;i++) bp_parms[i].length = 0;
27134 for(i=0;i<2;i++) global_config[i].length = 0;
27135@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
27136 const byte llc3[] = {4,3,2,2,6,6,0};
27137 const byte header[] = {0,2,3,3,0,0,0};
27138
27139+ pax_track_stack();
27140+
27141 for(i=0;i<8;i++) bp_parms[i].length = 0;
27142 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
27143 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
27144@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
27145 word appl_number_group_type[MAX_APPL];
27146 PLCI *auxplci;
27147
27148+ pax_track_stack();
27149+
27150 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
27151
27152 if(!a->group_optimization_enabled)
27153diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
27154--- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
27155+++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
27156@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
27157 IDI_SYNC_REQ req;
27158 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27159
27160+ pax_track_stack();
27161+
27162 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27163
27164 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27165diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
27166--- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
27167+++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
27168@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
27169 typedef struct _diva_os_idi_adapter_interface {
27170 diva_init_card_proc_t cleanup_adapter_proc;
27171 diva_cmd_card_proc_t cmd_proc;
27172-} diva_os_idi_adapter_interface_t;
27173+} __no_const diva_os_idi_adapter_interface_t;
27174
27175 typedef struct _diva_os_xdi_adapter {
27176 struct list_head link;
27177diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
27178--- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
27179+++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
27180@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
27181 } iocpar;
27182 void __user *argp = (void __user *)arg;
27183
27184+ pax_track_stack();
27185+
27186 #define name iocpar.name
27187 #define bname iocpar.bname
27188 #define iocts iocpar.iocts
27189diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
27190--- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
27191+++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
27192@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
27193 if (count > len)
27194 count = len;
27195 if (user) {
27196- if (copy_from_user(msg, buf, count))
27197+ if (count > sizeof msg || copy_from_user(msg, buf, count))
27198 return -EFAULT;
27199 } else
27200 memcpy(msg, buf, count);
27201diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
27202--- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
27203+++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
27204@@ -92,9 +92,17 @@ static __init int map_switcher(void)
27205 * it's worked so far. The end address needs +1 because __get_vm_area
27206 * allocates an extra guard page, so we need space for that.
27207 */
27208+
27209+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27210+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27211+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
27212+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27213+#else
27214 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27215 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
27216 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27217+#endif
27218+
27219 if (!switcher_vma) {
27220 err = -ENOMEM;
27221 printk("lguest: could not map switcher pages high\n");
27222@@ -119,7 +127,7 @@ static __init int map_switcher(void)
27223 * Now the Switcher is mapped at the right address, we can't fail!
27224 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
27225 */
27226- memcpy(switcher_vma->addr, start_switcher_text,
27227+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
27228 end_switcher_text - start_switcher_text);
27229
27230 printk(KERN_INFO "lguest: mapped switcher at %p\n",
27231diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
27232--- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
27233+++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
27234@@ -59,7 +59,7 @@ static struct {
27235 /* Offset from where switcher.S was compiled to where we've copied it */
27236 static unsigned long switcher_offset(void)
27237 {
27238- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
27239+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
27240 }
27241
27242 /* This cpu's struct lguest_pages. */
27243@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
27244 * These copies are pretty cheap, so we do them unconditionally: */
27245 /* Save the current Host top-level page directory.
27246 */
27247+
27248+#ifdef CONFIG_PAX_PER_CPU_PGD
27249+ pages->state.host_cr3 = read_cr3();
27250+#else
27251 pages->state.host_cr3 = __pa(current->mm->pgd);
27252+#endif
27253+
27254 /*
27255 * Set up the Guest's page tables to see this CPU's pages (and no
27256 * other CPU's pages).
27257@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
27258 * compiled-in switcher code and the high-mapped copy we just made.
27259 */
27260 for (i = 0; i < IDT_ENTRIES; i++)
27261- default_idt_entries[i] += switcher_offset();
27262+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
27263
27264 /*
27265 * Set up the Switcher's per-cpu areas.
27266@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
27267 * it will be undisturbed when we switch. To change %cs and jump we
27268 * need this structure to feed to Intel's "lcall" instruction.
27269 */
27270- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
27271+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
27272 lguest_entry.segment = LGUEST_CS;
27273
27274 /*
27275diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
27276--- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
27277+++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
27278@@ -87,6 +87,7 @@
27279 #include <asm/page.h>
27280 #include <asm/segment.h>
27281 #include <asm/lguest.h>
27282+#include <asm/processor-flags.h>
27283
27284 // We mark the start of the code to copy
27285 // It's placed in .text tho it's never run here
27286@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
27287 // Changes type when we load it: damn Intel!
27288 // For after we switch over our page tables
27289 // That entry will be read-only: we'd crash.
27290+
27291+#ifdef CONFIG_PAX_KERNEXEC
27292+ mov %cr0, %edx
27293+ xor $X86_CR0_WP, %edx
27294+ mov %edx, %cr0
27295+#endif
27296+
27297 movl $(GDT_ENTRY_TSS*8), %edx
27298 ltr %dx
27299
27300@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
27301 // Let's clear it again for our return.
27302 // The GDT descriptor of the Host
27303 // Points to the table after two "size" bytes
27304- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
27305+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
27306 // Clear "used" from type field (byte 5, bit 2)
27307- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
27308+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
27309+
27310+#ifdef CONFIG_PAX_KERNEXEC
27311+ mov %cr0, %eax
27312+ xor $X86_CR0_WP, %eax
27313+ mov %eax, %cr0
27314+#endif
27315
27316 // Once our page table's switched, the Guest is live!
27317 // The Host fades as we run this final step.
27318@@ -295,13 +309,12 @@ deliver_to_host:
27319 // I consulted gcc, and it gave
27320 // These instructions, which I gladly credit:
27321 leal (%edx,%ebx,8), %eax
27322- movzwl (%eax),%edx
27323- movl 4(%eax), %eax
27324- xorw %ax, %ax
27325- orl %eax, %edx
27326+ movl 4(%eax), %edx
27327+ movw (%eax), %dx
27328 // Now the address of the handler's in %edx
27329 // We call it now: its "iret" drops us home.
27330- jmp *%edx
27331+ ljmp $__KERNEL_CS, $1f
27332+1: jmp *%edx
27333
27334 // Every interrupt can come to us here
27335 // But we must truly tell each apart.
27336diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
27337--- linux-3.0.4/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
27338+++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
27339@@ -164,9 +164,9 @@ struct mapped_device {
27340 /*
27341 * Event handling.
27342 */
27343- atomic_t event_nr;
27344+ atomic_unchecked_t event_nr;
27345 wait_queue_head_t eventq;
27346- atomic_t uevent_seq;
27347+ atomic_unchecked_t uevent_seq;
27348 struct list_head uevent_list;
27349 spinlock_t uevent_lock; /* Protect access to uevent_list */
27350
27351@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
27352 rwlock_init(&md->map_lock);
27353 atomic_set(&md->holders, 1);
27354 atomic_set(&md->open_count, 0);
27355- atomic_set(&md->event_nr, 0);
27356- atomic_set(&md->uevent_seq, 0);
27357+ atomic_set_unchecked(&md->event_nr, 0);
27358+ atomic_set_unchecked(&md->uevent_seq, 0);
27359 INIT_LIST_HEAD(&md->uevent_list);
27360 spin_lock_init(&md->uevent_lock);
27361
27362@@ -1977,7 +1977,7 @@ static void event_callback(void *context
27363
27364 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
27365
27366- atomic_inc(&md->event_nr);
27367+ atomic_inc_unchecked(&md->event_nr);
27368 wake_up(&md->eventq);
27369 }
27370
27371@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
27372
27373 uint32_t dm_next_uevent_seq(struct mapped_device *md)
27374 {
27375- return atomic_add_return(1, &md->uevent_seq);
27376+ return atomic_add_return_unchecked(1, &md->uevent_seq);
27377 }
27378
27379 uint32_t dm_get_event_nr(struct mapped_device *md)
27380 {
27381- return atomic_read(&md->event_nr);
27382+ return atomic_read_unchecked(&md->event_nr);
27383 }
27384
27385 int dm_wait_event(struct mapped_device *md, int event_nr)
27386 {
27387 return wait_event_interruptible(md->eventq,
27388- (event_nr != atomic_read(&md->event_nr)));
27389+ (event_nr != atomic_read_unchecked(&md->event_nr)));
27390 }
27391
27392 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
27393diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
27394--- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
27395+++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
27396@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
27397 cmd == DM_LIST_VERSIONS_CMD)
27398 return 0;
27399
27400- if ((cmd == DM_DEV_CREATE_CMD)) {
27401+ if (cmd == DM_DEV_CREATE_CMD) {
27402 if (!*param->name) {
27403 DMWARN("name not supplied when creating device");
27404 return -EINVAL;
27405diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
27406--- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
27407+++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
27408@@ -40,7 +40,7 @@ enum dm_raid1_error {
27409
27410 struct mirror {
27411 struct mirror_set *ms;
27412- atomic_t error_count;
27413+ atomic_unchecked_t error_count;
27414 unsigned long error_type;
27415 struct dm_dev *dev;
27416 sector_t offset;
27417@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
27418 struct mirror *m;
27419
27420 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
27421- if (!atomic_read(&m->error_count))
27422+ if (!atomic_read_unchecked(&m->error_count))
27423 return m;
27424
27425 return NULL;
27426@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
27427 * simple way to tell if a device has encountered
27428 * errors.
27429 */
27430- atomic_inc(&m->error_count);
27431+ atomic_inc_unchecked(&m->error_count);
27432
27433 if (test_and_set_bit(error_type, &m->error_type))
27434 return;
27435@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
27436 struct mirror *m = get_default_mirror(ms);
27437
27438 do {
27439- if (likely(!atomic_read(&m->error_count)))
27440+ if (likely(!atomic_read_unchecked(&m->error_count)))
27441 return m;
27442
27443 if (m-- == ms->mirror)
27444@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
27445 {
27446 struct mirror *default_mirror = get_default_mirror(m->ms);
27447
27448- return !atomic_read(&default_mirror->error_count);
27449+ return !atomic_read_unchecked(&default_mirror->error_count);
27450 }
27451
27452 static int mirror_available(struct mirror_set *ms, struct bio *bio)
27453@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
27454 */
27455 if (likely(region_in_sync(ms, region, 1)))
27456 m = choose_mirror(ms, bio->bi_sector);
27457- else if (m && atomic_read(&m->error_count))
27458+ else if (m && atomic_read_unchecked(&m->error_count))
27459 m = NULL;
27460
27461 if (likely(m))
27462@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
27463 }
27464
27465 ms->mirror[mirror].ms = ms;
27466- atomic_set(&(ms->mirror[mirror].error_count), 0);
27467+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
27468 ms->mirror[mirror].error_type = 0;
27469 ms->mirror[mirror].offset = offset;
27470
27471@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
27472 */
27473 static char device_status_char(struct mirror *m)
27474 {
27475- if (!atomic_read(&(m->error_count)))
27476+ if (!atomic_read_unchecked(&(m->error_count)))
27477 return 'A';
27478
27479 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
27480diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
27481--- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
27482+++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
27483@@ -20,7 +20,7 @@ struct stripe {
27484 struct dm_dev *dev;
27485 sector_t physical_start;
27486
27487- atomic_t error_count;
27488+ atomic_unchecked_t error_count;
27489 };
27490
27491 struct stripe_c {
27492@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27493 kfree(sc);
27494 return r;
27495 }
27496- atomic_set(&(sc->stripe[i].error_count), 0);
27497+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27498 }
27499
27500 ti->private = sc;
27501@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27502 DMEMIT("%d ", sc->stripes);
27503 for (i = 0; i < sc->stripes; i++) {
27504 DMEMIT("%s ", sc->stripe[i].dev->name);
27505- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27506+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27507 'D' : 'A';
27508 }
27509 buffer[i] = '\0';
27510@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27511 */
27512 for (i = 0; i < sc->stripes; i++)
27513 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27514- atomic_inc(&(sc->stripe[i].error_count));
27515- if (atomic_read(&(sc->stripe[i].error_count)) <
27516+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
27517+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27518 DM_IO_ERROR_THRESHOLD)
27519 schedule_work(&sc->trigger_event);
27520 }
27521diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
27522--- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
27523+++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
27524@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27525 if (!dev_size)
27526 return 0;
27527
27528- if ((start >= dev_size) || (start + len > dev_size)) {
27529+ if ((start >= dev_size) || (len > dev_size - start)) {
27530 DMWARN("%s: %s too small for target: "
27531 "start=%llu, len=%llu, dev_size=%llu",
27532 dm_device_name(ti->table->md), bdevname(bdev, b),
27533diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
27534--- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
27535+++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
27536@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27537 * start build, activate spare
27538 */
27539 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27540-static atomic_t md_event_count;
27541+static atomic_unchecked_t md_event_count;
27542 void md_new_event(mddev_t *mddev)
27543 {
27544- atomic_inc(&md_event_count);
27545+ atomic_inc_unchecked(&md_event_count);
27546 wake_up(&md_event_waiters);
27547 }
27548 EXPORT_SYMBOL_GPL(md_new_event);
27549@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27550 */
27551 static void md_new_event_inintr(mddev_t *mddev)
27552 {
27553- atomic_inc(&md_event_count);
27554+ atomic_inc_unchecked(&md_event_count);
27555 wake_up(&md_event_waiters);
27556 }
27557
27558@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27559
27560 rdev->preferred_minor = 0xffff;
27561 rdev->data_offset = le64_to_cpu(sb->data_offset);
27562- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27563+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27564
27565 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27566 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27567@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27568 else
27569 sb->resync_offset = cpu_to_le64(0);
27570
27571- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27572+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27573
27574 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27575 sb->size = cpu_to_le64(mddev->dev_sectors);
27576@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27577 static ssize_t
27578 errors_show(mdk_rdev_t *rdev, char *page)
27579 {
27580- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27581+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27582 }
27583
27584 static ssize_t
27585@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27586 char *e;
27587 unsigned long n = simple_strtoul(buf, &e, 10);
27588 if (*buf && (*e == 0 || *e == '\n')) {
27589- atomic_set(&rdev->corrected_errors, n);
27590+ atomic_set_unchecked(&rdev->corrected_errors, n);
27591 return len;
27592 }
27593 return -EINVAL;
27594@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27595 rdev->last_read_error.tv_sec = 0;
27596 rdev->last_read_error.tv_nsec = 0;
27597 atomic_set(&rdev->nr_pending, 0);
27598- atomic_set(&rdev->read_errors, 0);
27599- atomic_set(&rdev->corrected_errors, 0);
27600+ atomic_set_unchecked(&rdev->read_errors, 0);
27601+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27602
27603 INIT_LIST_HEAD(&rdev->same_set);
27604 init_waitqueue_head(&rdev->blocked_wait);
27605@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27606
27607 spin_unlock(&pers_lock);
27608 seq_printf(seq, "\n");
27609- mi->event = atomic_read(&md_event_count);
27610+ mi->event = atomic_read_unchecked(&md_event_count);
27611 return 0;
27612 }
27613 if (v == (void*)2) {
27614@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27615 chunk_kb ? "KB" : "B");
27616 if (bitmap->file) {
27617 seq_printf(seq, ", file: ");
27618- seq_path(seq, &bitmap->file->f_path, " \t\n");
27619+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27620 }
27621
27622 seq_printf(seq, "\n");
27623@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27624 else {
27625 struct seq_file *p = file->private_data;
27626 p->private = mi;
27627- mi->event = atomic_read(&md_event_count);
27628+ mi->event = atomic_read_unchecked(&md_event_count);
27629 }
27630 return error;
27631 }
27632@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27633 /* always allow read */
27634 mask = POLLIN | POLLRDNORM;
27635
27636- if (mi->event != atomic_read(&md_event_count))
27637+ if (mi->event != atomic_read_unchecked(&md_event_count))
27638 mask |= POLLERR | POLLPRI;
27639 return mask;
27640 }
27641@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27642 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27643 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27644 (int)part_stat_read(&disk->part0, sectors[1]) -
27645- atomic_read(&disk->sync_io);
27646+ atomic_read_unchecked(&disk->sync_io);
27647 /* sync IO will cause sync_io to increase before the disk_stats
27648 * as sync_io is counted when a request starts, and
27649 * disk_stats is counted when it completes.
27650diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27651--- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27652+++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27653@@ -97,13 +97,13 @@ struct mdk_rdev_s
27654 * only maintained for arrays that
27655 * support hot removal
27656 */
27657- atomic_t read_errors; /* number of consecutive read errors that
27658+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27659 * we have tried to ignore.
27660 */
27661 struct timespec last_read_error; /* monotonic time since our
27662 * last read error
27663 */
27664- atomic_t corrected_errors; /* number of corrected read errors,
27665+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27666 * for reporting to userspace and storing
27667 * in superblock.
27668 */
27669@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27670
27671 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27672 {
27673- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27674+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27675 }
27676
27677 struct mdk_personality
27678diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27679--- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27680+++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27681@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27682 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27683 set_bit(R10BIO_Uptodate, &r10_bio->state);
27684 else {
27685- atomic_add(r10_bio->sectors,
27686+ atomic_add_unchecked(r10_bio->sectors,
27687 &conf->mirrors[d].rdev->corrected_errors);
27688 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27689 md_error(r10_bio->mddev,
27690@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27691 {
27692 struct timespec cur_time_mon;
27693 unsigned long hours_since_last;
27694- unsigned int read_errors = atomic_read(&rdev->read_errors);
27695+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27696
27697 ktime_get_ts(&cur_time_mon);
27698
27699@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27700 * overflowing the shift of read_errors by hours_since_last.
27701 */
27702 if (hours_since_last >= 8 * sizeof(read_errors))
27703- atomic_set(&rdev->read_errors, 0);
27704+ atomic_set_unchecked(&rdev->read_errors, 0);
27705 else
27706- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27707+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27708 }
27709
27710 /*
27711@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27712 return;
27713
27714 check_decay_read_errors(mddev, rdev);
27715- atomic_inc(&rdev->read_errors);
27716- if (atomic_read(&rdev->read_errors) > max_read_errors) {
27717+ atomic_inc_unchecked(&rdev->read_errors);
27718+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27719 char b[BDEVNAME_SIZE];
27720 bdevname(rdev->bdev, b);
27721
27722@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27723 "md/raid10:%s: %s: Raid device exceeded "
27724 "read_error threshold [cur %d:max %d]\n",
27725 mdname(mddev), b,
27726- atomic_read(&rdev->read_errors), max_read_errors);
27727+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27728 printk(KERN_NOTICE
27729 "md/raid10:%s: %s: Failing raid device\n",
27730 mdname(mddev), b);
27731@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27732 test_bit(In_sync, &rdev->flags)) {
27733 atomic_inc(&rdev->nr_pending);
27734 rcu_read_unlock();
27735- atomic_add(s, &rdev->corrected_errors);
27736+ atomic_add_unchecked(s, &rdev->corrected_errors);
27737 if (sync_page_io(rdev,
27738 r10_bio->devs[sl].addr +
27739 sect,
27740diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27741--- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27742+++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27743@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27744 rdev_dec_pending(rdev, mddev);
27745 md_error(mddev, rdev);
27746 } else
27747- atomic_add(s, &rdev->corrected_errors);
27748+ atomic_add_unchecked(s, &rdev->corrected_errors);
27749 }
27750 d = start;
27751 while (d != r1_bio->read_disk) {
27752@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27753 /* Well, this device is dead */
27754 md_error(mddev, rdev);
27755 else {
27756- atomic_add(s, &rdev->corrected_errors);
27757+ atomic_add_unchecked(s, &rdev->corrected_errors);
27758 printk(KERN_INFO
27759 "md/raid1:%s: read error corrected "
27760 "(%d sectors at %llu on %s)\n",
27761diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27762--- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27763+++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27764@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27765 bi->bi_next = NULL;
27766 if ((rw & WRITE) &&
27767 test_bit(R5_ReWrite, &sh->dev[i].flags))
27768- atomic_add(STRIPE_SECTORS,
27769+ atomic_add_unchecked(STRIPE_SECTORS,
27770 &rdev->corrected_errors);
27771 generic_make_request(bi);
27772 } else {
27773@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27774 clear_bit(R5_ReadError, &sh->dev[i].flags);
27775 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27776 }
27777- if (atomic_read(&conf->disks[i].rdev->read_errors))
27778- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27779+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27780+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27781 } else {
27782 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27783 int retry = 0;
27784 rdev = conf->disks[i].rdev;
27785
27786 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27787- atomic_inc(&rdev->read_errors);
27788+ atomic_inc_unchecked(&rdev->read_errors);
27789 if (conf->mddev->degraded >= conf->max_degraded)
27790 printk_rl(KERN_WARNING
27791 "md/raid:%s: read error not correctable "
27792@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27793 (unsigned long long)(sh->sector
27794 + rdev->data_offset),
27795 bdn);
27796- else if (atomic_read(&rdev->read_errors)
27797+ else if (atomic_read_unchecked(&rdev->read_errors)
27798 > conf->max_nr_stripes)
27799 printk(KERN_WARNING
27800 "md/raid:%s: Too many read errors, failing device %s.\n",
27801@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27802 sector_t r_sector;
27803 struct stripe_head sh2;
27804
27805+ pax_track_stack();
27806
27807 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27808 stripe = new_sector;
27809diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27810--- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27811+++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27812@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27813
27814 int x[32], y[32], w[32], h[32];
27815
27816+ pax_track_stack();
27817+
27818 /* clear out memory */
27819 memset(&line_list[0], 0x00, sizeof(u32)*32);
27820 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27821diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27822--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27823+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27824@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27825 u8 buf[HOST_LINK_BUF_SIZE];
27826 int i;
27827
27828+ pax_track_stack();
27829+
27830 dprintk("%s\n", __func__);
27831
27832 /* check if we have space for a link buf in the rx_buffer */
27833@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27834 unsigned long timeout;
27835 int written;
27836
27837+ pax_track_stack();
27838+
27839 dprintk("%s\n", __func__);
27840
27841 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27842diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27843--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27844+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27845@@ -68,12 +68,12 @@ struct dvb_demux_feed {
27846 union {
27847 struct dmx_ts_feed ts;
27848 struct dmx_section_feed sec;
27849- } feed;
27850+ } __no_const feed;
27851
27852 union {
27853 dmx_ts_cb ts;
27854 dmx_section_cb sec;
27855- } cb;
27856+ } __no_const cb;
27857
27858 struct dvb_demux *demux;
27859 void *priv;
27860diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27861--- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27862+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27863@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27864 const struct dvb_device *template, void *priv, int type)
27865 {
27866 struct dvb_device *dvbdev;
27867- struct file_operations *dvbdevfops;
27868+ file_operations_no_const *dvbdevfops;
27869 struct device *clsdev;
27870 int minor;
27871 int id;
27872diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27873--- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27874+++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27875@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27876 struct dib0700_adapter_state {
27877 int (*set_param_save) (struct dvb_frontend *,
27878 struct dvb_frontend_parameters *);
27879-};
27880+} __no_const;
27881
27882 static int dib7070_set_param_override(struct dvb_frontend *fe,
27883 struct dvb_frontend_parameters *fep)
27884diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27885--- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27886+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27887@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27888 if (!buf)
27889 return -ENOMEM;
27890
27891+ pax_track_stack();
27892+
27893 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27894 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27895 hx.addr, hx.len, hx.chk);
27896diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27897--- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27898+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27899@@ -97,7 +97,7 @@
27900 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27901
27902 struct dibusb_state {
27903- struct dib_fe_xfer_ops ops;
27904+ dib_fe_xfer_ops_no_const ops;
27905 int mt2060_present;
27906 u8 tuner_addr;
27907 };
27908diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27909--- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27910+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27911@@ -95,7 +95,7 @@ struct su3000_state {
27912
27913 struct s6x0_state {
27914 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27915-};
27916+} __no_const;
27917
27918 /* debug */
27919 static int dvb_usb_dw2102_debug;
27920diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27921--- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27922+++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27923@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27924 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27925 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27926
27927+ pax_track_stack();
27928
27929 data[0] = 0x8a;
27930 len_in = 1;
27931@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27932 int ret = 0, len_in;
27933 u8 data[512] = {0};
27934
27935+ pax_track_stack();
27936+
27937 data[0] = 0x0a;
27938 len_in = 1;
27939 info("FRM Firmware Cold Reset");
27940diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27941--- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27942+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27943@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27944 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27945 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27946 };
27947+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27948
27949 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27950 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27951- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27952+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27953 #else
27954 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27955 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27956diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27957--- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27958+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27959@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27960 static struct dvb_frontend_ops dib3000mb_ops;
27961
27962 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27963- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27964+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27965 {
27966 struct dib3000_state* state = NULL;
27967
27968diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27969--- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27970+++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27971@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27972 int ret = -1;
27973 int sync;
27974
27975+ pax_track_stack();
27976+
27977 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27978
27979 fcp = 3000;
27980diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27981--- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27982+++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27983@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27984 u8 tudata[585];
27985 int i;
27986
27987+ pax_track_stack();
27988+
27989 dprintk("Firmware is %zd bytes\n",fw->size);
27990
27991 /* Get eprom data */
27992diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27993--- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27994+++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27995@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27996 struct i2c_client c;
27997 u8 eedata[256];
27998
27999+ pax_track_stack();
28000+
28001 memset(&c, 0, sizeof(c));
28002 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
28003 c.adapter = &cx->i2c_adap[0];
28004diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
28005--- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
28006+++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
28007@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
28008 bool handle = false;
28009 struct ir_raw_event ir_core_event[64];
28010
28011+ pax_track_stack();
28012+
28013 do {
28014 num = 0;
28015 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
28016diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
28017--- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
28018+++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
28019@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
28020 u8 *eeprom;
28021 struct tveeprom tvdata;
28022
28023+ pax_track_stack();
28024+
28025 memset(&tvdata,0,sizeof(tvdata));
28026
28027 eeprom = pvr2_eeprom_fetch(hdw);
28028diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
28029--- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
28030+++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
28031@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
28032 unsigned char localPAT[256];
28033 unsigned char localPMT[256];
28034
28035+ pax_track_stack();
28036+
28037 /* Set video format - must be done first as it resets other settings */
28038 set_reg8(client, 0x41, h->video_format);
28039
28040diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
28041--- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
28042+++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
28043@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
28044 u8 tmp[512];
28045 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28046
28047+ pax_track_stack();
28048+
28049 /* While any outstand message on the bus exists... */
28050 do {
28051
28052@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
28053 u8 tmp[512];
28054 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28055
28056+ pax_track_stack();
28057+
28058 while (loop) {
28059
28060 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
28061diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
28062--- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
28063+++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
28064@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
28065
28066 /* Platform device functions */
28067
28068-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
28069+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
28070 .vidioc_querycap = timblogiw_querycap,
28071 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
28072 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
28073diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
28074--- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
28075+++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
28076@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
28077 unsigned char rv, gv, bv;
28078 static unsigned char *Y, *U, *V;
28079
28080+ pax_track_stack();
28081+
28082 frame = usbvision->cur_frame;
28083 image_size = frame->frmwidth * frame->frmheight;
28084 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
28085diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
28086--- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
28087+++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
28088@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
28089 {
28090 struct videobuf_queue q;
28091
28092+ pax_track_stack();
28093+
28094 /* Required to make generic handler to call __videobuf_alloc */
28095 q.int_ops = &sg_ops;
28096
28097diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
28098--- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
28099+++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
28100@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
28101 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
28102 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
28103
28104+#ifdef CONFIG_GRKERNSEC_HIDESYM
28105+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
28106+#else
28107 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
28108 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
28109+#endif
28110+
28111 /*
28112 * Rounding UP to nearest 4-kB boundary here...
28113 */
28114diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
28115--- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
28116+++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
28117@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
28118 return 0;
28119 }
28120
28121+static inline void
28122+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28123+{
28124+ if (phy_info->port_details) {
28125+ phy_info->port_details->rphy = rphy;
28126+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28127+ ioc->name, rphy));
28128+ }
28129+
28130+ if (rphy) {
28131+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28132+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28133+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28134+ ioc->name, rphy, rphy->dev.release));
28135+ }
28136+}
28137+
28138 /* no mutex */
28139 static void
28140 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
28141@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
28142 return NULL;
28143 }
28144
28145-static inline void
28146-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28147-{
28148- if (phy_info->port_details) {
28149- phy_info->port_details->rphy = rphy;
28150- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28151- ioc->name, rphy));
28152- }
28153-
28154- if (rphy) {
28155- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28156- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28157- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28158- ioc->name, rphy, rphy->dev.release));
28159- }
28160-}
28161-
28162 static inline struct sas_port *
28163 mptsas_get_port(struct mptsas_phyinfo *phy_info)
28164 {
28165diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
28166--- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
28167+++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
28168@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
28169
28170 h = shost_priv(SChost);
28171
28172- if (h) {
28173- if (h->info_kbuf == NULL)
28174- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28175- return h->info_kbuf;
28176- h->info_kbuf[0] = '\0';
28177+ if (!h)
28178+ return NULL;
28179
28180- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28181- h->info_kbuf[size-1] = '\0';
28182- }
28183+ if (h->info_kbuf == NULL)
28184+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28185+ return h->info_kbuf;
28186+ h->info_kbuf[0] = '\0';
28187+
28188+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28189+ h->info_kbuf[size-1] = '\0';
28190
28191 return h->info_kbuf;
28192 }
28193diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
28194--- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
28195+++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
28196@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
28197 struct i2o_message *msg;
28198 unsigned int iop;
28199
28200+ pax_track_stack();
28201+
28202 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
28203 return -EFAULT;
28204
28205diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
28206--- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
28207+++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
28208@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
28209 "Array Controller Device"
28210 };
28211
28212-static char *chtostr(u8 * chars, int n)
28213-{
28214- char tmp[256];
28215- tmp[0] = 0;
28216- return strncat(tmp, (char *)chars, n);
28217-}
28218-
28219 static int i2o_report_query_status(struct seq_file *seq, int block_status,
28220 char *group)
28221 {
28222@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
28223
28224 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
28225 seq_printf(seq, "%-#8x", ddm_table.module_id);
28226- seq_printf(seq, "%-29s",
28227- chtostr(ddm_table.module_name_version, 28));
28228+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
28229 seq_printf(seq, "%9d ", ddm_table.data_size);
28230 seq_printf(seq, "%8d", ddm_table.code_size);
28231
28232@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
28233
28234 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
28235 seq_printf(seq, "%-#8x", dst->module_id);
28236- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
28237- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
28238+ seq_printf(seq, "%-.28s", dst->module_name_version);
28239+ seq_printf(seq, "%-.8s", dst->date);
28240 seq_printf(seq, "%8d ", dst->module_size);
28241 seq_printf(seq, "%8d ", dst->mpb_size);
28242 seq_printf(seq, "0x%04x", dst->module_flags);
28243@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
28244 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
28245 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
28246 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
28247- seq_printf(seq, "Vendor info : %s\n",
28248- chtostr((u8 *) (work32 + 2), 16));
28249- seq_printf(seq, "Product info : %s\n",
28250- chtostr((u8 *) (work32 + 6), 16));
28251- seq_printf(seq, "Description : %s\n",
28252- chtostr((u8 *) (work32 + 10), 16));
28253- seq_printf(seq, "Product rev. : %s\n",
28254- chtostr((u8 *) (work32 + 14), 8));
28255+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
28256+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
28257+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
28258+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
28259
28260 seq_printf(seq, "Serial number : ");
28261 print_serial_number(seq, (u8 *) (work32 + 16),
28262@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
28263 }
28264
28265 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
28266- seq_printf(seq, "Module name : %s\n",
28267- chtostr(result.module_name, 24));
28268- seq_printf(seq, "Module revision : %s\n",
28269- chtostr(result.module_rev, 8));
28270+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
28271+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
28272
28273 seq_printf(seq, "Serial number : ");
28274 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
28275@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
28276 return 0;
28277 }
28278
28279- seq_printf(seq, "Device name : %s\n",
28280- chtostr(result.device_name, 64));
28281- seq_printf(seq, "Service name : %s\n",
28282- chtostr(result.service_name, 64));
28283- seq_printf(seq, "Physical name : %s\n",
28284- chtostr(result.physical_location, 64));
28285- seq_printf(seq, "Instance number : %s\n",
28286- chtostr(result.instance_number, 4));
28287+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
28288+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
28289+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
28290+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
28291
28292 return 0;
28293 }
28294diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
28295--- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
28296+++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
28297@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
28298
28299 spin_lock_irqsave(&c->context_list_lock, flags);
28300
28301- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
28302- atomic_inc(&c->context_list_counter);
28303+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
28304+ atomic_inc_unchecked(&c->context_list_counter);
28305
28306- entry->context = atomic_read(&c->context_list_counter);
28307+ entry->context = atomic_read_unchecked(&c->context_list_counter);
28308
28309 list_add(&entry->list, &c->context_list);
28310
28311@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
28312
28313 #if BITS_PER_LONG == 64
28314 spin_lock_init(&c->context_list_lock);
28315- atomic_set(&c->context_list_counter, 0);
28316+ atomic_set_unchecked(&c->context_list_counter, 0);
28317 INIT_LIST_HEAD(&c->context_list);
28318 #endif
28319
28320diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
28321--- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
28322+++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
28323@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
28324
28325 struct abx500_device_entry {
28326 struct list_head list;
28327- struct abx500_ops ops;
28328+ abx500_ops_no_const ops;
28329 struct device *dev;
28330 };
28331
28332diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
28333--- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
28334+++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
28335@@ -13,6 +13,7 @@
28336
28337 #include <linux/kernel.h>
28338 #include <linux/module.h>
28339+#include <linux/slab.h>
28340 #include <linux/init.h>
28341 #include <linux/pci.h>
28342 #include <linux/interrupt.h>
28343diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
28344--- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
28345+++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
28346@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
28347 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
28348 int ret;
28349
28350+ pax_track_stack();
28351+
28352 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
28353 return -EINVAL;
28354
28355diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
28356--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
28357+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
28358@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
28359 * the lid is closed. This leads to interrupts as soon as a little move
28360 * is done.
28361 */
28362- atomic_inc(&lis3_dev.count);
28363+ atomic_inc_unchecked(&lis3_dev.count);
28364
28365 wake_up_interruptible(&lis3_dev.misc_wait);
28366 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28367@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
28368 if (lis3_dev.pm_dev)
28369 pm_runtime_get_sync(lis3_dev.pm_dev);
28370
28371- atomic_set(&lis3_dev.count, 0);
28372+ atomic_set_unchecked(&lis3_dev.count, 0);
28373 return 0;
28374 }
28375
28376@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
28377 add_wait_queue(&lis3_dev.misc_wait, &wait);
28378 while (true) {
28379 set_current_state(TASK_INTERRUPTIBLE);
28380- data = atomic_xchg(&lis3_dev.count, 0);
28381+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28382 if (data)
28383 break;
28384
28385@@ -583,7 +583,7 @@ out:
28386 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28387 {
28388 poll_wait(file, &lis3_dev.misc_wait, wait);
28389- if (atomic_read(&lis3_dev.count))
28390+ if (atomic_read_unchecked(&lis3_dev.count))
28391 return POLLIN | POLLRDNORM;
28392 return 0;
28393 }
28394diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
28395--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
28396+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
28397@@ -265,7 +265,7 @@ struct lis3lv02d {
28398 struct input_polled_dev *idev; /* input device */
28399 struct platform_device *pdev; /* platform device */
28400 struct regulator_bulk_data regulators[2];
28401- atomic_t count; /* interrupt count after last read */
28402+ atomic_unchecked_t count; /* interrupt count after last read */
28403 union axis_conversion ac; /* hw -> logical axis */
28404 int mapped_btns[3];
28405
28406diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
28407--- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
28408+++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
28409@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
28410 unsigned long nsec;
28411
28412 nsec = CLKS2NSEC(clks);
28413- atomic_long_inc(&mcs_op_statistics[op].count);
28414- atomic_long_add(nsec, &mcs_op_statistics[op].total);
28415+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28416+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28417 if (mcs_op_statistics[op].max < nsec)
28418 mcs_op_statistics[op].max = nsec;
28419 }
28420diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
28421--- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
28422+++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
28423@@ -32,9 +32,9 @@
28424
28425 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28426
28427-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28428+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28429 {
28430- unsigned long val = atomic_long_read(v);
28431+ unsigned long val = atomic_long_read_unchecked(v);
28432
28433 seq_printf(s, "%16lu %s\n", val, id);
28434 }
28435@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28436
28437 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28438 for (op = 0; op < mcsop_last; op++) {
28439- count = atomic_long_read(&mcs_op_statistics[op].count);
28440- total = atomic_long_read(&mcs_op_statistics[op].total);
28441+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28442+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28443 max = mcs_op_statistics[op].max;
28444 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28445 count ? total / count : 0, max);
28446diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
28447--- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
28448+++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
28449@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28450 * GRU statistics.
28451 */
28452 struct gru_stats_s {
28453- atomic_long_t vdata_alloc;
28454- atomic_long_t vdata_free;
28455- atomic_long_t gts_alloc;
28456- atomic_long_t gts_free;
28457- atomic_long_t gms_alloc;
28458- atomic_long_t gms_free;
28459- atomic_long_t gts_double_allocate;
28460- atomic_long_t assign_context;
28461- atomic_long_t assign_context_failed;
28462- atomic_long_t free_context;
28463- atomic_long_t load_user_context;
28464- atomic_long_t load_kernel_context;
28465- atomic_long_t lock_kernel_context;
28466- atomic_long_t unlock_kernel_context;
28467- atomic_long_t steal_user_context;
28468- atomic_long_t steal_kernel_context;
28469- atomic_long_t steal_context_failed;
28470- atomic_long_t nopfn;
28471- atomic_long_t asid_new;
28472- atomic_long_t asid_next;
28473- atomic_long_t asid_wrap;
28474- atomic_long_t asid_reuse;
28475- atomic_long_t intr;
28476- atomic_long_t intr_cbr;
28477- atomic_long_t intr_tfh;
28478- atomic_long_t intr_spurious;
28479- atomic_long_t intr_mm_lock_failed;
28480- atomic_long_t call_os;
28481- atomic_long_t call_os_wait_queue;
28482- atomic_long_t user_flush_tlb;
28483- atomic_long_t user_unload_context;
28484- atomic_long_t user_exception;
28485- atomic_long_t set_context_option;
28486- atomic_long_t check_context_retarget_intr;
28487- atomic_long_t check_context_unload;
28488- atomic_long_t tlb_dropin;
28489- atomic_long_t tlb_preload_page;
28490- atomic_long_t tlb_dropin_fail_no_asid;
28491- atomic_long_t tlb_dropin_fail_upm;
28492- atomic_long_t tlb_dropin_fail_invalid;
28493- atomic_long_t tlb_dropin_fail_range_active;
28494- atomic_long_t tlb_dropin_fail_idle;
28495- atomic_long_t tlb_dropin_fail_fmm;
28496- atomic_long_t tlb_dropin_fail_no_exception;
28497- atomic_long_t tfh_stale_on_fault;
28498- atomic_long_t mmu_invalidate_range;
28499- atomic_long_t mmu_invalidate_page;
28500- atomic_long_t flush_tlb;
28501- atomic_long_t flush_tlb_gru;
28502- atomic_long_t flush_tlb_gru_tgh;
28503- atomic_long_t flush_tlb_gru_zero_asid;
28504-
28505- atomic_long_t copy_gpa;
28506- atomic_long_t read_gpa;
28507-
28508- atomic_long_t mesq_receive;
28509- atomic_long_t mesq_receive_none;
28510- atomic_long_t mesq_send;
28511- atomic_long_t mesq_send_failed;
28512- atomic_long_t mesq_noop;
28513- atomic_long_t mesq_send_unexpected_error;
28514- atomic_long_t mesq_send_lb_overflow;
28515- atomic_long_t mesq_send_qlimit_reached;
28516- atomic_long_t mesq_send_amo_nacked;
28517- atomic_long_t mesq_send_put_nacked;
28518- atomic_long_t mesq_page_overflow;
28519- atomic_long_t mesq_qf_locked;
28520- atomic_long_t mesq_qf_noop_not_full;
28521- atomic_long_t mesq_qf_switch_head_failed;
28522- atomic_long_t mesq_qf_unexpected_error;
28523- atomic_long_t mesq_noop_unexpected_error;
28524- atomic_long_t mesq_noop_lb_overflow;
28525- atomic_long_t mesq_noop_qlimit_reached;
28526- atomic_long_t mesq_noop_amo_nacked;
28527- atomic_long_t mesq_noop_put_nacked;
28528- atomic_long_t mesq_noop_page_overflow;
28529+ atomic_long_unchecked_t vdata_alloc;
28530+ atomic_long_unchecked_t vdata_free;
28531+ atomic_long_unchecked_t gts_alloc;
28532+ atomic_long_unchecked_t gts_free;
28533+ atomic_long_unchecked_t gms_alloc;
28534+ atomic_long_unchecked_t gms_free;
28535+ atomic_long_unchecked_t gts_double_allocate;
28536+ atomic_long_unchecked_t assign_context;
28537+ atomic_long_unchecked_t assign_context_failed;
28538+ atomic_long_unchecked_t free_context;
28539+ atomic_long_unchecked_t load_user_context;
28540+ atomic_long_unchecked_t load_kernel_context;
28541+ atomic_long_unchecked_t lock_kernel_context;
28542+ atomic_long_unchecked_t unlock_kernel_context;
28543+ atomic_long_unchecked_t steal_user_context;
28544+ atomic_long_unchecked_t steal_kernel_context;
28545+ atomic_long_unchecked_t steal_context_failed;
28546+ atomic_long_unchecked_t nopfn;
28547+ atomic_long_unchecked_t asid_new;
28548+ atomic_long_unchecked_t asid_next;
28549+ atomic_long_unchecked_t asid_wrap;
28550+ atomic_long_unchecked_t asid_reuse;
28551+ atomic_long_unchecked_t intr;
28552+ atomic_long_unchecked_t intr_cbr;
28553+ atomic_long_unchecked_t intr_tfh;
28554+ atomic_long_unchecked_t intr_spurious;
28555+ atomic_long_unchecked_t intr_mm_lock_failed;
28556+ atomic_long_unchecked_t call_os;
28557+ atomic_long_unchecked_t call_os_wait_queue;
28558+ atomic_long_unchecked_t user_flush_tlb;
28559+ atomic_long_unchecked_t user_unload_context;
28560+ atomic_long_unchecked_t user_exception;
28561+ atomic_long_unchecked_t set_context_option;
28562+ atomic_long_unchecked_t check_context_retarget_intr;
28563+ atomic_long_unchecked_t check_context_unload;
28564+ atomic_long_unchecked_t tlb_dropin;
28565+ atomic_long_unchecked_t tlb_preload_page;
28566+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28567+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28568+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28569+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28570+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28571+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28572+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28573+ atomic_long_unchecked_t tfh_stale_on_fault;
28574+ atomic_long_unchecked_t mmu_invalidate_range;
28575+ atomic_long_unchecked_t mmu_invalidate_page;
28576+ atomic_long_unchecked_t flush_tlb;
28577+ atomic_long_unchecked_t flush_tlb_gru;
28578+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28579+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28580+
28581+ atomic_long_unchecked_t copy_gpa;
28582+ atomic_long_unchecked_t read_gpa;
28583+
28584+ atomic_long_unchecked_t mesq_receive;
28585+ atomic_long_unchecked_t mesq_receive_none;
28586+ atomic_long_unchecked_t mesq_send;
28587+ atomic_long_unchecked_t mesq_send_failed;
28588+ atomic_long_unchecked_t mesq_noop;
28589+ atomic_long_unchecked_t mesq_send_unexpected_error;
28590+ atomic_long_unchecked_t mesq_send_lb_overflow;
28591+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28592+ atomic_long_unchecked_t mesq_send_amo_nacked;
28593+ atomic_long_unchecked_t mesq_send_put_nacked;
28594+ atomic_long_unchecked_t mesq_page_overflow;
28595+ atomic_long_unchecked_t mesq_qf_locked;
28596+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28597+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28598+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28599+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28600+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28601+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28602+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28603+ atomic_long_unchecked_t mesq_noop_put_nacked;
28604+ atomic_long_unchecked_t mesq_noop_page_overflow;
28605
28606 };
28607
28608@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28609 tghop_invalidate, mcsop_last};
28610
28611 struct mcs_op_statistic {
28612- atomic_long_t count;
28613- atomic_long_t total;
28614+ atomic_long_unchecked_t count;
28615+ atomic_long_unchecked_t total;
28616 unsigned long max;
28617 };
28618
28619@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28620
28621 #define STAT(id) do { \
28622 if (gru_options & OPT_STATS) \
28623- atomic_long_inc(&gru_stats.id); \
28624+ atomic_long_inc_unchecked(&gru_stats.id); \
28625 } while (0)
28626
28627 #ifdef CONFIG_SGI_GRU_DEBUG
28628diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28629--- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28630+++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28631@@ -289,7 +289,7 @@ struct xpc_interface {
28632 xpc_notify_func, void *);
28633 void (*received) (short, int, void *);
28634 enum xp_retval (*partid_to_nasids) (short, void *);
28635-};
28636+} __no_const;
28637
28638 extern struct xpc_interface xpc_interface;
28639
28640diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28641--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28642+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28643@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28644 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28645 unsigned long timeo = jiffies + HZ;
28646
28647+ pax_track_stack();
28648+
28649 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28650 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28651 goto sleep;
28652@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28653 unsigned long initial_adr;
28654 int initial_len = len;
28655
28656+ pax_track_stack();
28657+
28658 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28659 adr += chip->start;
28660 initial_adr = adr;
28661@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28662 int retries = 3;
28663 int ret;
28664
28665+ pax_track_stack();
28666+
28667 adr += chip->start;
28668
28669 retry:
28670diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28671--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28672+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28673@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28674 unsigned long cmd_addr;
28675 struct cfi_private *cfi = map->fldrv_priv;
28676
28677+ pax_track_stack();
28678+
28679 adr += chip->start;
28680
28681 /* Ensure cmd read/writes are aligned. */
28682@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28683 DECLARE_WAITQUEUE(wait, current);
28684 int wbufsize, z;
28685
28686+ pax_track_stack();
28687+
28688 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28689 if (adr & (map_bankwidth(map)-1))
28690 return -EINVAL;
28691@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28692 DECLARE_WAITQUEUE(wait, current);
28693 int ret = 0;
28694
28695+ pax_track_stack();
28696+
28697 adr += chip->start;
28698
28699 /* Let's determine this according to the interleave only once */
28700@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28701 unsigned long timeo = jiffies + HZ;
28702 DECLARE_WAITQUEUE(wait, current);
28703
28704+ pax_track_stack();
28705+
28706 adr += chip->start;
28707
28708 /* Let's determine this according to the interleave only once */
28709@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28710 unsigned long timeo = jiffies + HZ;
28711 DECLARE_WAITQUEUE(wait, current);
28712
28713+ pax_track_stack();
28714+
28715 adr += chip->start;
28716
28717 /* Let's determine this according to the interleave only once */
28718diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28719--- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28720+++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28721@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28722
28723 /* The ECC will not be calculated correctly if less than 512 is written */
28724 /* DBB-
28725- if (len != 0x200 && eccbuf)
28726+ if (len != 0x200)
28727 printk(KERN_WARNING
28728 "ECC needs a full sector write (adr: %lx size %lx)\n",
28729 (long) to, (long) len);
28730diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28731--- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28732+++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28733@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28734 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28735
28736 /* Don't allow read past end of device */
28737- if (from >= this->totlen)
28738+ if (from >= this->totlen || !len)
28739 return -EINVAL;
28740
28741 /* Don't allow a single read to cross a 512-byte block boundary */
28742diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28743--- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28744+++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28745@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28746 loff_t offset;
28747 uint16_t srcunitswap = cpu_to_le16(srcunit);
28748
28749+ pax_track_stack();
28750+
28751 eun = &part->EUNInfo[srcunit];
28752 xfer = &part->XferInfo[xferunit];
28753 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28754diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28755--- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28756+++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28757@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28758 struct inftl_oob oob;
28759 size_t retlen;
28760
28761+ pax_track_stack();
28762+
28763 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28764 "pending=%d)\n", inftl, thisVUC, pendingblock);
28765
28766diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28767--- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28768+++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28769@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28770 struct INFTLPartition *ip;
28771 size_t retlen;
28772
28773+ pax_track_stack();
28774+
28775 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28776
28777 /*
28778diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28779--- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28780+++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28781@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28782 {
28783 map_word pfow_val[4];
28784
28785+ pax_track_stack();
28786+
28787 /* Check identification string */
28788 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28789 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28790diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28791--- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28792+++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28793@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28794 u_long size;
28795 struct mtd_info_user info;
28796
28797+ pax_track_stack();
28798+
28799 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28800
28801 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28802diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28803--- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28804+++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28805@@ -26,6 +26,7 @@
28806 #include <linux/pci.h>
28807 #include <linux/mtd/mtd.h>
28808 #include <linux/module.h>
28809+#include <linux/slab.h>
28810
28811 #include "denali.h"
28812
28813diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28814--- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28815+++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28816@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28817 int inplace = 1;
28818 size_t retlen;
28819
28820+ pax_track_stack();
28821+
28822 memset(BlockMap, 0xff, sizeof(BlockMap));
28823 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28824
28825diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28826--- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28827+++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28828@@ -24,6 +24,7 @@
28829 #include <asm/errno.h>
28830 #include <linux/delay.h>
28831 #include <linux/slab.h>
28832+#include <linux/sched.h>
28833 #include <linux/mtd/mtd.h>
28834 #include <linux/mtd/nand.h>
28835 #include <linux/mtd/nftl.h>
28836@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28837 struct mtd_info *mtd = nftl->mbd.mtd;
28838 unsigned int i;
28839
28840+ pax_track_stack();
28841+
28842 /* Assume logical EraseSize == physical erasesize for starting the scan.
28843 We'll sort it out later if we find a MediaHeader which says otherwise */
28844 /* Actually, we won't. The new DiskOnChip driver has already scanned
28845diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28846--- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28847+++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28848@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28849 static int __init bytes_str_to_int(const char *str)
28850 {
28851 char *endp;
28852- unsigned long result;
28853+ unsigned long result, scale = 1;
28854
28855 result = simple_strtoul(str, &endp, 0);
28856 if (str == endp || result >= INT_MAX) {
28857@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28858
28859 switch (*endp) {
28860 case 'G':
28861- result *= 1024;
28862+ scale *= 1024;
28863 case 'M':
28864- result *= 1024;
28865+ scale *= 1024;
28866 case 'K':
28867- result *= 1024;
28868+ scale *= 1024;
28869 if (endp[1] == 'i' && endp[2] == 'B')
28870 endp += 2;
28871 case '\0':
28872@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28873 return -EINVAL;
28874 }
28875
28876- return result;
28877+ if ((intoverflow_t)result*scale >= INT_MAX) {
28878+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28879+ str);
28880+ return -EINVAL;
28881+ }
28882+
28883+ return result*scale;
28884 }
28885
28886 /**
28887diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28888--- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28889+++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28890@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28891 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28892 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28893
28894-static struct bfa_ioc_hwif nw_hwif_ct;
28895+static struct bfa_ioc_hwif nw_hwif_ct = {
28896+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28897+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28898+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28899+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28900+ .ioc_map_port = bfa_ioc_ct_map_port,
28901+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28902+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28903+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28904+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28905+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28906+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28907+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28908+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28909+};
28910
28911 /**
28912 * Called from bfa_ioc_attach() to map asic specific calls.
28913@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28914 void
28915 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28916 {
28917- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28918- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28919- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28920- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28921- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28922- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28923- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28924- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28925- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28926- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28927- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28928- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28929- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28930-
28931 ioc->ioc_hwif = &nw_hwif_ct;
28932 }
28933
28934diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28935--- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28936+++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28937@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28938 struct bna_intr_info *intr_info =
28939 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28940 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28941- struct bna_tx_event_cbfn tx_cbfn;
28942+ static struct bna_tx_event_cbfn tx_cbfn = {
28943+ /* Initialize the tx event handlers */
28944+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28945+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28946+ .tx_stall_cbfn = bnad_cb_tx_stall,
28947+ .tx_resume_cbfn = bnad_cb_tx_resume,
28948+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28949+ };
28950 struct bna_tx *tx;
28951 unsigned long flags;
28952
28953@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28954 tx_config->txq_depth = bnad->txq_depth;
28955 tx_config->tx_type = BNA_TX_T_REGULAR;
28956
28957- /* Initialize the tx event handlers */
28958- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28959- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28960- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28961- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28962- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28963-
28964 /* Get BNA's resource requirement for one tx object */
28965 spin_lock_irqsave(&bnad->bna_lock, flags);
28966 bna_tx_res_req(bnad->num_txq_per_tx,
28967@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28968 struct bna_intr_info *intr_info =
28969 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28970 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28971- struct bna_rx_event_cbfn rx_cbfn;
28972+ static struct bna_rx_event_cbfn rx_cbfn = {
28973+ /* Initialize the Rx event handlers */
28974+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28975+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28976+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28977+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28978+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28979+ .rx_post_cbfn = bnad_cb_rx_post
28980+ };
28981 struct bna_rx *rx;
28982 unsigned long flags;
28983
28984 /* Initialize the Rx object configuration */
28985 bnad_init_rx_config(bnad, rx_config);
28986
28987- /* Initialize the Rx event handlers */
28988- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28989- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28990- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28991- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28992- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28993- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28994-
28995 /* Get BNA's resource requirement for one Rx object */
28996 spin_lock_irqsave(&bnad->bna_lock, flags);
28997 bna_rx_res_req(rx_config, res_info);
28998diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28999--- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
29000+++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
29001@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
29002 int rc = 0;
29003 u32 magic, csum;
29004
29005+ pax_track_stack();
29006+
29007 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
29008 goto test_nvram_done;
29009
29010diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
29011--- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29012+++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
29013@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
29014 int i, rc;
29015 u32 magic, crc;
29016
29017+ pax_track_stack();
29018+
29019 if (BP_NOMCP(bp))
29020 return 0;
29021
29022diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
29023--- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
29024+++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
29025@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
29026 */
29027 struct l2t_skb_cb {
29028 arp_failure_handler_func arp_failure_handler;
29029-};
29030+} __no_const;
29031
29032 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
29033
29034diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
29035--- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
29036+++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
29037@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
29038 unsigned int nchan = adap->params.nports;
29039 struct msix_entry entries[MAX_INGQ + 1];
29040
29041+ pax_track_stack();
29042+
29043 for (i = 0; i < ARRAY_SIZE(entries); ++i)
29044 entries[i].entry = i;
29045
29046diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
29047--- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
29048+++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
29049@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
29050 u8 vpd[VPD_LEN], csum;
29051 unsigned int vpdr_len, kw_offset, id_len;
29052
29053+ pax_track_stack();
29054+
29055 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
29056 if (ret < 0)
29057 return ret;
29058diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
29059--- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
29060+++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
29061@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
29062 {
29063 struct e1000_hw *hw = &adapter->hw;
29064 struct e1000_mac_info *mac = &hw->mac;
29065- struct e1000_mac_operations *func = &mac->ops;
29066+ e1000_mac_operations_no_const *func = &mac->ops;
29067 u32 swsm = 0;
29068 u32 swsm2 = 0;
29069 bool force_clear_smbi = false;
29070diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
29071--- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
29072+++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
29073@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
29074 {
29075 struct e1000_hw *hw = &adapter->hw;
29076 struct e1000_mac_info *mac = &hw->mac;
29077- struct e1000_mac_operations *func = &mac->ops;
29078+ e1000_mac_operations_no_const *func = &mac->ops;
29079
29080 /* Set media type */
29081 switch (adapter->pdev->device) {
29082diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
29083--- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
29084+++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
29085@@ -776,6 +776,7 @@ struct e1000_mac_operations {
29086 void (*write_vfta)(struct e1000_hw *, u32, u32);
29087 s32 (*read_mac_addr)(struct e1000_hw *);
29088 };
29089+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29090
29091 /* Function pointers for the PHY. */
29092 struct e1000_phy_operations {
29093@@ -799,6 +800,7 @@ struct e1000_phy_operations {
29094 void (*power_up)(struct e1000_hw *);
29095 void (*power_down)(struct e1000_hw *);
29096 };
29097+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29098
29099 /* Function pointers for the NVM. */
29100 struct e1000_nvm_operations {
29101@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
29102 s32 (*validate)(struct e1000_hw *);
29103 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
29104 };
29105+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29106
29107 struct e1000_mac_info {
29108- struct e1000_mac_operations ops;
29109+ e1000_mac_operations_no_const ops;
29110 u8 addr[ETH_ALEN];
29111 u8 perm_addr[ETH_ALEN];
29112
29113@@ -853,7 +856,7 @@ struct e1000_mac_info {
29114 };
29115
29116 struct e1000_phy_info {
29117- struct e1000_phy_operations ops;
29118+ e1000_phy_operations_no_const ops;
29119
29120 enum e1000_phy_type type;
29121
29122@@ -887,7 +890,7 @@ struct e1000_phy_info {
29123 };
29124
29125 struct e1000_nvm_info {
29126- struct e1000_nvm_operations ops;
29127+ e1000_nvm_operations_no_const ops;
29128
29129 enum e1000_nvm_type type;
29130 enum e1000_nvm_override override;
29131diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
29132--- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
29133+++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
29134@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
29135 unsigned char buf[512];
29136 int count1;
29137
29138+ pax_track_stack();
29139+
29140 if (!count)
29141 return;
29142
29143diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
29144--- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
29145+++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
29146@@ -314,6 +314,7 @@ struct e1000_mac_operations {
29147 s32 (*read_mac_addr)(struct e1000_hw *);
29148 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
29149 };
29150+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29151
29152 struct e1000_phy_operations {
29153 s32 (*acquire)(struct e1000_hw *);
29154@@ -330,6 +331,7 @@ struct e1000_phy_operations {
29155 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
29156 s32 (*write_reg)(struct e1000_hw *, u32, u16);
29157 };
29158+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29159
29160 struct e1000_nvm_operations {
29161 s32 (*acquire)(struct e1000_hw *);
29162@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
29163 s32 (*update)(struct e1000_hw *);
29164 s32 (*validate)(struct e1000_hw *);
29165 };
29166+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29167
29168 struct e1000_info {
29169 s32 (*get_invariants)(struct e1000_hw *);
29170@@ -350,7 +353,7 @@ struct e1000_info {
29171 extern const struct e1000_info e1000_82575_info;
29172
29173 struct e1000_mac_info {
29174- struct e1000_mac_operations ops;
29175+ e1000_mac_operations_no_const ops;
29176
29177 u8 addr[6];
29178 u8 perm_addr[6];
29179@@ -388,7 +391,7 @@ struct e1000_mac_info {
29180 };
29181
29182 struct e1000_phy_info {
29183- struct e1000_phy_operations ops;
29184+ e1000_phy_operations_no_const ops;
29185
29186 enum e1000_phy_type type;
29187
29188@@ -423,7 +426,7 @@ struct e1000_phy_info {
29189 };
29190
29191 struct e1000_nvm_info {
29192- struct e1000_nvm_operations ops;
29193+ e1000_nvm_operations_no_const ops;
29194 enum e1000_nvm_type type;
29195 enum e1000_nvm_override override;
29196
29197@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
29198 s32 (*check_for_ack)(struct e1000_hw *, u16);
29199 s32 (*check_for_rst)(struct e1000_hw *, u16);
29200 };
29201+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29202
29203 struct e1000_mbx_stats {
29204 u32 msgs_tx;
29205@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
29206 };
29207
29208 struct e1000_mbx_info {
29209- struct e1000_mbx_operations ops;
29210+ e1000_mbx_operations_no_const ops;
29211 struct e1000_mbx_stats stats;
29212 u32 timeout;
29213 u32 usec_delay;
29214diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
29215--- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
29216+++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
29217@@ -189,9 +189,10 @@ struct e1000_mac_operations {
29218 s32 (*read_mac_addr)(struct e1000_hw *);
29219 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
29220 };
29221+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29222
29223 struct e1000_mac_info {
29224- struct e1000_mac_operations ops;
29225+ e1000_mac_operations_no_const ops;
29226 u8 addr[6];
29227 u8 perm_addr[6];
29228
29229@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
29230 s32 (*check_for_ack)(struct e1000_hw *);
29231 s32 (*check_for_rst)(struct e1000_hw *);
29232 };
29233+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29234
29235 struct e1000_mbx_stats {
29236 u32 msgs_tx;
29237@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
29238 };
29239
29240 struct e1000_mbx_info {
29241- struct e1000_mbx_operations ops;
29242+ e1000_mbx_operations_no_const ops;
29243 struct e1000_mbx_stats stats;
29244 u32 timeout;
29245 u32 usec_delay;
29246diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
29247--- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
29248+++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
29249@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
29250 u32 rctl;
29251 int i;
29252
29253+ pax_track_stack();
29254+
29255 /* Check for Promiscuous and All Multicast modes */
29256
29257 rctl = IXGB_READ_REG(hw, RCTL);
29258diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
29259--- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
29260+++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
29261@@ -261,6 +261,9 @@ void __devinit
29262 ixgb_check_options(struct ixgb_adapter *adapter)
29263 {
29264 int bd = adapter->bd_number;
29265+
29266+ pax_track_stack();
29267+
29268 if (bd >= IXGB_MAX_NIC) {
29269 pr_notice("Warning: no configuration for board #%i\n", bd);
29270 pr_notice("Using defaults for all values\n");
29271diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
29272--- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
29273+++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
29274@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
29275 s32 (*update_checksum)(struct ixgbe_hw *);
29276 u16 (*calc_checksum)(struct ixgbe_hw *);
29277 };
29278+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
29279
29280 struct ixgbe_mac_operations {
29281 s32 (*init_hw)(struct ixgbe_hw *);
29282@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
29283 /* Flow Control */
29284 s32 (*fc_enable)(struct ixgbe_hw *, s32);
29285 };
29286+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
29287
29288 struct ixgbe_phy_operations {
29289 s32 (*identify)(struct ixgbe_hw *);
29290@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
29291 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
29292 s32 (*check_overtemp)(struct ixgbe_hw *);
29293 };
29294+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
29295
29296 struct ixgbe_eeprom_info {
29297- struct ixgbe_eeprom_operations ops;
29298+ ixgbe_eeprom_operations_no_const ops;
29299 enum ixgbe_eeprom_type type;
29300 u32 semaphore_delay;
29301 u16 word_size;
29302@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
29303
29304 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
29305 struct ixgbe_mac_info {
29306- struct ixgbe_mac_operations ops;
29307+ ixgbe_mac_operations_no_const ops;
29308 enum ixgbe_mac_type type;
29309 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
29310 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
29311@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
29312 };
29313
29314 struct ixgbe_phy_info {
29315- struct ixgbe_phy_operations ops;
29316+ ixgbe_phy_operations_no_const ops;
29317 struct mdio_if_info mdio;
29318 enum ixgbe_phy_type type;
29319 u32 id;
29320@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
29321 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
29322 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
29323 };
29324+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
29325
29326 struct ixgbe_mbx_stats {
29327 u32 msgs_tx;
29328@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
29329 };
29330
29331 struct ixgbe_mbx_info {
29332- struct ixgbe_mbx_operations ops;
29333+ ixgbe_mbx_operations_no_const ops;
29334 struct ixgbe_mbx_stats stats;
29335 u32 timeout;
29336 u32 usec_delay;
29337diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
29338--- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
29339+++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
29340@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
29341 s32 (*clear_vfta)(struct ixgbe_hw *);
29342 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
29343 };
29344+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
29345
29346 enum ixgbe_mac_type {
29347 ixgbe_mac_unknown = 0,
29348@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
29349 };
29350
29351 struct ixgbe_mac_info {
29352- struct ixgbe_mac_operations ops;
29353+ ixgbe_mac_operations_no_const ops;
29354 u8 addr[6];
29355 u8 perm_addr[6];
29356
29357@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
29358 s32 (*check_for_ack)(struct ixgbe_hw *);
29359 s32 (*check_for_rst)(struct ixgbe_hw *);
29360 };
29361+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
29362
29363 struct ixgbe_mbx_stats {
29364 u32 msgs_tx;
29365@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
29366 };
29367
29368 struct ixgbe_mbx_info {
29369- struct ixgbe_mbx_operations ops;
29370+ ixgbe_mbx_operations_no_const ops;
29371 struct ixgbe_mbx_stats stats;
29372 u32 timeout;
29373 u32 udelay;
29374diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
29375--- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
29376+++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
29377@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
29378 int rc;
29379 u64 counter[TOTAL_PORT_COUNTER_NUM];
29380
29381+ pax_track_stack();
29382+
29383 mutex_lock(&hw_priv->lock);
29384 n = SWITCH_PORT_NUM;
29385 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
29386diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
29387--- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
29388+++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
29389@@ -40,6 +40,7 @@
29390 #include <linux/dma-mapping.h>
29391 #include <linux/slab.h>
29392 #include <linux/io-mapping.h>
29393+#include <linux/sched.h>
29394
29395 #include <linux/mlx4/device.h>
29396 #include <linux/mlx4/doorbell.h>
29397@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
29398 u64 icm_size;
29399 int err;
29400
29401+ pax_track_stack();
29402+
29403 err = mlx4_QUERY_FW(dev);
29404 if (err) {
29405 if (err == -EACCES)
29406diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
29407--- linux-3.0.4/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
29408+++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
29409@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
29410 int i, num_irqs, err;
29411 u8 first_ldg;
29412
29413+ pax_track_stack();
29414+
29415 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29416 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29417 ldg_num_map[i] = first_ldg + i;
29418diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
29419--- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
29420+++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
29421@@ -82,7 +82,7 @@ static int cards_found;
29422 /*
29423 * VLB I/O addresses
29424 */
29425-static unsigned int pcnet32_portlist[] __initdata =
29426+static unsigned int pcnet32_portlist[] __devinitdata =
29427 { 0x300, 0x320, 0x340, 0x360, 0 };
29428
29429 static int pcnet32_debug;
29430@@ -270,7 +270,7 @@ struct pcnet32_private {
29431 struct sk_buff **rx_skbuff;
29432 dma_addr_t *tx_dma_addr;
29433 dma_addr_t *rx_dma_addr;
29434- struct pcnet32_access a;
29435+ struct pcnet32_access *a;
29436 spinlock_t lock; /* Guard lock */
29437 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29438 unsigned int rx_ring_size; /* current rx ring size */
29439@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29440 u16 val;
29441
29442 netif_wake_queue(dev);
29443- val = lp->a.read_csr(ioaddr, CSR3);
29444+ val = lp->a->read_csr(ioaddr, CSR3);
29445 val &= 0x00ff;
29446- lp->a.write_csr(ioaddr, CSR3, val);
29447+ lp->a->write_csr(ioaddr, CSR3, val);
29448 napi_enable(&lp->napi);
29449 }
29450
29451@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29452 r = mii_link_ok(&lp->mii_if);
29453 } else if (lp->chip_version >= PCNET32_79C970A) {
29454 ulong ioaddr = dev->base_addr; /* card base I/O address */
29455- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29456+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29457 } else { /* can not detect link on really old chips */
29458 r = 1;
29459 }
29460@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29461 pcnet32_netif_stop(dev);
29462
29463 spin_lock_irqsave(&lp->lock, flags);
29464- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29465+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29466
29467 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29468
29469@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29470 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29471 {
29472 struct pcnet32_private *lp = netdev_priv(dev);
29473- struct pcnet32_access *a = &lp->a; /* access to registers */
29474+ struct pcnet32_access *a = lp->a; /* access to registers */
29475 ulong ioaddr = dev->base_addr; /* card base I/O address */
29476 struct sk_buff *skb; /* sk buff */
29477 int x, i; /* counters */
29478@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29479 pcnet32_netif_stop(dev);
29480
29481 spin_lock_irqsave(&lp->lock, flags);
29482- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29483+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29484
29485 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29486
29487 /* Reset the PCNET32 */
29488- lp->a.reset(ioaddr);
29489- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29490+ lp->a->reset(ioaddr);
29491+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29492
29493 /* switch pcnet32 to 32bit mode */
29494- lp->a.write_bcr(ioaddr, 20, 2);
29495+ lp->a->write_bcr(ioaddr, 20, 2);
29496
29497 /* purge & init rings but don't actually restart */
29498 pcnet32_restart(dev, 0x0000);
29499
29500- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29501+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29502
29503 /* Initialize Transmit buffers. */
29504 size = data_len + 15;
29505@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29506
29507 /* set int loopback in CSR15 */
29508 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29509- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29510+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29511
29512 teststatus = cpu_to_le16(0x8000);
29513- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29514+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29515
29516 /* Check status of descriptors */
29517 for (x = 0; x < numbuffs; x++) {
29518@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29519 }
29520 }
29521
29522- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29523+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29524 wmb();
29525 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29526 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29527@@ -1015,7 +1015,7 @@ clean_up:
29528 pcnet32_restart(dev, CSR0_NORMAL);
29529 } else {
29530 pcnet32_purge_rx_ring(dev);
29531- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29532+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29533 }
29534 spin_unlock_irqrestore(&lp->lock, flags);
29535
29536@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
29537 enum ethtool_phys_id_state state)
29538 {
29539 struct pcnet32_private *lp = netdev_priv(dev);
29540- struct pcnet32_access *a = &lp->a;
29541+ struct pcnet32_access *a = lp->a;
29542 ulong ioaddr = dev->base_addr;
29543 unsigned long flags;
29544 int i;
29545@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
29546 {
29547 int csr5;
29548 struct pcnet32_private *lp = netdev_priv(dev);
29549- struct pcnet32_access *a = &lp->a;
29550+ struct pcnet32_access *a = lp->a;
29551 ulong ioaddr = dev->base_addr;
29552 int ticks;
29553
29554@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29555 spin_lock_irqsave(&lp->lock, flags);
29556 if (pcnet32_tx(dev)) {
29557 /* reset the chip to clear the error condition, then restart */
29558- lp->a.reset(ioaddr);
29559- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29560+ lp->a->reset(ioaddr);
29561+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29562 pcnet32_restart(dev, CSR0_START);
29563 netif_wake_queue(dev);
29564 }
29565@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29566 __napi_complete(napi);
29567
29568 /* clear interrupt masks */
29569- val = lp->a.read_csr(ioaddr, CSR3);
29570+ val = lp->a->read_csr(ioaddr, CSR3);
29571 val &= 0x00ff;
29572- lp->a.write_csr(ioaddr, CSR3, val);
29573+ lp->a->write_csr(ioaddr, CSR3, val);
29574
29575 /* Set interrupt enable. */
29576- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29577+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29578
29579 spin_unlock_irqrestore(&lp->lock, flags);
29580 }
29581@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29582 int i, csr0;
29583 u16 *buff = ptr;
29584 struct pcnet32_private *lp = netdev_priv(dev);
29585- struct pcnet32_access *a = &lp->a;
29586+ struct pcnet32_access *a = lp->a;
29587 ulong ioaddr = dev->base_addr;
29588 unsigned long flags;
29589
29590@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29591 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29592 if (lp->phymask & (1 << j)) {
29593 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29594- lp->a.write_bcr(ioaddr, 33,
29595+ lp->a->write_bcr(ioaddr, 33,
29596 (j << 5) | i);
29597- *buff++ = lp->a.read_bcr(ioaddr, 34);
29598+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29599 }
29600 }
29601 }
29602@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29603 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29604 lp->options |= PCNET32_PORT_FD;
29605
29606- lp->a = *a;
29607+ lp->a = a;
29608
29609 /* prior to register_netdev, dev->name is not yet correct */
29610 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29611@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29612 if (lp->mii) {
29613 /* lp->phycount and lp->phymask are set to 0 by memset above */
29614
29615- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29616+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29617 /* scan for PHYs */
29618 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29619 unsigned short id1, id2;
29620@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29621 pr_info("Found PHY %04x:%04x at address %d\n",
29622 id1, id2, i);
29623 }
29624- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29625+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29626 if (lp->phycount > 1)
29627 lp->options |= PCNET32_PORT_MII;
29628 }
29629@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29630 }
29631
29632 /* Reset the PCNET32 */
29633- lp->a.reset(ioaddr);
29634+ lp->a->reset(ioaddr);
29635
29636 /* switch pcnet32 to 32bit mode */
29637- lp->a.write_bcr(ioaddr, 20, 2);
29638+ lp->a->write_bcr(ioaddr, 20, 2);
29639
29640 netif_printk(lp, ifup, KERN_DEBUG, dev,
29641 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29642@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29643 (u32) (lp->init_dma_addr));
29644
29645 /* set/reset autoselect bit */
29646- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29647+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29648 if (lp->options & PCNET32_PORT_ASEL)
29649 val |= 2;
29650- lp->a.write_bcr(ioaddr, 2, val);
29651+ lp->a->write_bcr(ioaddr, 2, val);
29652
29653 /* handle full duplex setting */
29654 if (lp->mii_if.full_duplex) {
29655- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29656+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29657 if (lp->options & PCNET32_PORT_FD) {
29658 val |= 1;
29659 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29660@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29661 if (lp->chip_version == 0x2627)
29662 val |= 3;
29663 }
29664- lp->a.write_bcr(ioaddr, 9, val);
29665+ lp->a->write_bcr(ioaddr, 9, val);
29666 }
29667
29668 /* set/reset GPSI bit in test register */
29669- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29670+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29671 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29672 val |= 0x10;
29673- lp->a.write_csr(ioaddr, 124, val);
29674+ lp->a->write_csr(ioaddr, 124, val);
29675
29676 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29677 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29678@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29679 * duplex, and/or enable auto negotiation, and clear DANAS
29680 */
29681 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29682- lp->a.write_bcr(ioaddr, 32,
29683- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29684+ lp->a->write_bcr(ioaddr, 32,
29685+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29686 /* disable Auto Negotiation, set 10Mpbs, HD */
29687- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29688+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29689 if (lp->options & PCNET32_PORT_FD)
29690 val |= 0x10;
29691 if (lp->options & PCNET32_PORT_100)
29692 val |= 0x08;
29693- lp->a.write_bcr(ioaddr, 32, val);
29694+ lp->a->write_bcr(ioaddr, 32, val);
29695 } else {
29696 if (lp->options & PCNET32_PORT_ASEL) {
29697- lp->a.write_bcr(ioaddr, 32,
29698- lp->a.read_bcr(ioaddr,
29699+ lp->a->write_bcr(ioaddr, 32,
29700+ lp->a->read_bcr(ioaddr,
29701 32) | 0x0080);
29702 /* enable auto negotiate, setup, disable fd */
29703- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29704+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29705 val |= 0x20;
29706- lp->a.write_bcr(ioaddr, 32, val);
29707+ lp->a->write_bcr(ioaddr, 32, val);
29708 }
29709 }
29710 } else {
29711@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29712 * There is really no good other way to handle multiple PHYs
29713 * other than turning off all automatics
29714 */
29715- val = lp->a.read_bcr(ioaddr, 2);
29716- lp->a.write_bcr(ioaddr, 2, val & ~2);
29717- val = lp->a.read_bcr(ioaddr, 32);
29718- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29719+ val = lp->a->read_bcr(ioaddr, 2);
29720+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29721+ val = lp->a->read_bcr(ioaddr, 32);
29722+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29723
29724 if (!(lp->options & PCNET32_PORT_ASEL)) {
29725 /* setup ecmd */
29726@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29727 ethtool_cmd_speed_set(&ecmd,
29728 (lp->options & PCNET32_PORT_100) ?
29729 SPEED_100 : SPEED_10);
29730- bcr9 = lp->a.read_bcr(ioaddr, 9);
29731+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29732
29733 if (lp->options & PCNET32_PORT_FD) {
29734 ecmd.duplex = DUPLEX_FULL;
29735@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29736 ecmd.duplex = DUPLEX_HALF;
29737 bcr9 |= ~(1 << 0);
29738 }
29739- lp->a.write_bcr(ioaddr, 9, bcr9);
29740+ lp->a->write_bcr(ioaddr, 9, bcr9);
29741 }
29742
29743 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29744@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29745
29746 #ifdef DO_DXSUFLO
29747 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29748- val = lp->a.read_csr(ioaddr, CSR3);
29749+ val = lp->a->read_csr(ioaddr, CSR3);
29750 val |= 0x40;
29751- lp->a.write_csr(ioaddr, CSR3, val);
29752+ lp->a->write_csr(ioaddr, CSR3, val);
29753 }
29754 #endif
29755
29756@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29757 napi_enable(&lp->napi);
29758
29759 /* Re-initialize the PCNET32, and start it when done. */
29760- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29761- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29762+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29763+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29764
29765- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29766- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29767+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29768+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29769
29770 netif_start_queue(dev);
29771
29772@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29773
29774 i = 0;
29775 while (i++ < 100)
29776- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29777+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29778 break;
29779 /*
29780 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29781 * reports that doing so triggers a bug in the '974.
29782 */
29783- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29784+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29785
29786 netif_printk(lp, ifup, KERN_DEBUG, dev,
29787 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29788 i,
29789 (u32) (lp->init_dma_addr),
29790- lp->a.read_csr(ioaddr, CSR0));
29791+ lp->a->read_csr(ioaddr, CSR0));
29792
29793 spin_unlock_irqrestore(&lp->lock, flags);
29794
29795@@ -2218,7 +2218,7 @@ err_free_ring:
29796 * Switch back to 16bit mode to avoid problems with dumb
29797 * DOS packet driver after a warm reboot
29798 */
29799- lp->a.write_bcr(ioaddr, 20, 4);
29800+ lp->a->write_bcr(ioaddr, 20, 4);
29801
29802 err_free_irq:
29803 spin_unlock_irqrestore(&lp->lock, flags);
29804@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29805
29806 /* wait for stop */
29807 for (i = 0; i < 100; i++)
29808- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29809+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29810 break;
29811
29812 if (i >= 100)
29813@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29814 return;
29815
29816 /* ReInit Ring */
29817- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29818+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29819 i = 0;
29820 while (i++ < 1000)
29821- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29822+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29823 break;
29824
29825- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29826+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29827 }
29828
29829 static void pcnet32_tx_timeout(struct net_device *dev)
29830@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29831 /* Transmitter timeout, serious problems. */
29832 if (pcnet32_debug & NETIF_MSG_DRV)
29833 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29834- dev->name, lp->a.read_csr(ioaddr, CSR0));
29835- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29836+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29837+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29838 dev->stats.tx_errors++;
29839 if (netif_msg_tx_err(lp)) {
29840 int i;
29841@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29842
29843 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29844 "%s() called, csr0 %4.4x\n",
29845- __func__, lp->a.read_csr(ioaddr, CSR0));
29846+ __func__, lp->a->read_csr(ioaddr, CSR0));
29847
29848 /* Default status -- will not enable Successful-TxDone
29849 * interrupt when that option is available to us.
29850@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29851 dev->stats.tx_bytes += skb->len;
29852
29853 /* Trigger an immediate send poll. */
29854- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29855+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29856
29857 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29858 lp->tx_full = 1;
29859@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29860
29861 spin_lock(&lp->lock);
29862
29863- csr0 = lp->a.read_csr(ioaddr, CSR0);
29864+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29865 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29866 if (csr0 == 0xffff)
29867 break; /* PCMCIA remove happened */
29868 /* Acknowledge all of the current interrupt sources ASAP. */
29869- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29870+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29871
29872 netif_printk(lp, intr, KERN_DEBUG, dev,
29873 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29874- csr0, lp->a.read_csr(ioaddr, CSR0));
29875+ csr0, lp->a->read_csr(ioaddr, CSR0));
29876
29877 /* Log misc errors. */
29878 if (csr0 & 0x4000)
29879@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29880 if (napi_schedule_prep(&lp->napi)) {
29881 u16 val;
29882 /* set interrupt masks */
29883- val = lp->a.read_csr(ioaddr, CSR3);
29884+ val = lp->a->read_csr(ioaddr, CSR3);
29885 val |= 0x5f00;
29886- lp->a.write_csr(ioaddr, CSR3, val);
29887+ lp->a->write_csr(ioaddr, CSR3, val);
29888
29889 __napi_schedule(&lp->napi);
29890 break;
29891 }
29892- csr0 = lp->a.read_csr(ioaddr, CSR0);
29893+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29894 }
29895
29896 netif_printk(lp, intr, KERN_DEBUG, dev,
29897 "exiting interrupt, csr0=%#4.4x\n",
29898- lp->a.read_csr(ioaddr, CSR0));
29899+ lp->a->read_csr(ioaddr, CSR0));
29900
29901 spin_unlock(&lp->lock);
29902
29903@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29904
29905 spin_lock_irqsave(&lp->lock, flags);
29906
29907- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29908+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29909
29910 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29911 "Shutting down ethercard, status was %2.2x\n",
29912- lp->a.read_csr(ioaddr, CSR0));
29913+ lp->a->read_csr(ioaddr, CSR0));
29914
29915 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29916- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29917+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29918
29919 /*
29920 * Switch back to 16bit mode to avoid problems with dumb
29921 * DOS packet driver after a warm reboot
29922 */
29923- lp->a.write_bcr(ioaddr, 20, 4);
29924+ lp->a->write_bcr(ioaddr, 20, 4);
29925
29926 spin_unlock_irqrestore(&lp->lock, flags);
29927
29928@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29929 unsigned long flags;
29930
29931 spin_lock_irqsave(&lp->lock, flags);
29932- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29933+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29934 spin_unlock_irqrestore(&lp->lock, flags);
29935
29936 return &dev->stats;
29937@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29938 if (dev->flags & IFF_ALLMULTI) {
29939 ib->filter[0] = cpu_to_le32(~0U);
29940 ib->filter[1] = cpu_to_le32(~0U);
29941- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29942- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29943- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29944- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29945+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29946+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29947+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29948+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29949 return;
29950 }
29951 /* clear the multicast filter */
29952@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29953 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29954 }
29955 for (i = 0; i < 4; i++)
29956- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29957+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29958 le16_to_cpu(mcast_table[i]));
29959 }
29960
29961@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29962
29963 spin_lock_irqsave(&lp->lock, flags);
29964 suspended = pcnet32_suspend(dev, &flags, 0);
29965- csr15 = lp->a.read_csr(ioaddr, CSR15);
29966+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29967 if (dev->flags & IFF_PROMISC) {
29968 /* Log any net taps. */
29969 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29970 lp->init_block->mode =
29971 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29972 7);
29973- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29974+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29975 } else {
29976 lp->init_block->mode =
29977 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29978- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29979+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29980 pcnet32_load_multicast(dev);
29981 }
29982
29983 if (suspended) {
29984 int csr5;
29985 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29986- csr5 = lp->a.read_csr(ioaddr, CSR5);
29987- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29988+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29989+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29990 } else {
29991- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29992+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29993 pcnet32_restart(dev, CSR0_NORMAL);
29994 netif_wake_queue(dev);
29995 }
29996@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29997 if (!lp->mii)
29998 return 0;
29999
30000- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30001- val_out = lp->a.read_bcr(ioaddr, 34);
30002+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30003+ val_out = lp->a->read_bcr(ioaddr, 34);
30004
30005 return val_out;
30006 }
30007@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
30008 if (!lp->mii)
30009 return;
30010
30011- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30012- lp->a.write_bcr(ioaddr, 34, val);
30013+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30014+ lp->a->write_bcr(ioaddr, 34, val);
30015 }
30016
30017 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
30018@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
30019 curr_link = mii_link_ok(&lp->mii_if);
30020 } else {
30021 ulong ioaddr = dev->base_addr; /* card base I/O address */
30022- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
30023+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
30024 }
30025 if (!curr_link) {
30026 if (prev_link || verbose) {
30027@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
30028 (ecmd.duplex == DUPLEX_FULL)
30029 ? "full" : "half");
30030 }
30031- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
30032+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
30033 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
30034 if (lp->mii_if.full_duplex)
30035 bcr9 |= (1 << 0);
30036 else
30037 bcr9 &= ~(1 << 0);
30038- lp->a.write_bcr(dev->base_addr, 9, bcr9);
30039+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
30040 }
30041 } else {
30042 netif_info(lp, link, dev, "link up\n");
30043diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
30044--- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
30045+++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
30046@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
30047 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
30048 struct ppp_stats stats;
30049 struct ppp_comp_stats cstats;
30050- char *vers;
30051
30052 switch (cmd) {
30053 case SIOCGPPPSTATS:
30054@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
30055 break;
30056
30057 case SIOCGPPPVER:
30058- vers = PPP_VERSION;
30059- if (copy_to_user(addr, vers, strlen(vers) + 1))
30060+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
30061 break;
30062 err = 0;
30063 break;
30064diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
30065--- linux-3.0.4/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
30066+++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
30067@@ -645,12 +645,12 @@ struct rtl8169_private {
30068 struct mdio_ops {
30069 void (*write)(void __iomem *, int, int);
30070 int (*read)(void __iomem *, int);
30071- } mdio_ops;
30072+ } __no_const mdio_ops;
30073
30074 struct pll_power_ops {
30075 void (*down)(struct rtl8169_private *);
30076 void (*up)(struct rtl8169_private *);
30077- } pll_power_ops;
30078+ } __no_const pll_power_ops;
30079
30080 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
30081 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
30082diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
30083--- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
30084+++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
30085@@ -134,6 +134,7 @@
30086 #define CHIPREV_ID_5750_A0 0x4000
30087 #define CHIPREV_ID_5750_A1 0x4001
30088 #define CHIPREV_ID_5750_A3 0x4003
30089+#define CHIPREV_ID_5750_C1 0x4201
30090 #define CHIPREV_ID_5750_C2 0x4202
30091 #define CHIPREV_ID_5752_A0_HW 0x5000
30092 #define CHIPREV_ID_5752_A0 0x6000
30093diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
30094--- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
30095+++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
30096@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
30097
30098 static int __init abyss_init (void)
30099 {
30100- abyss_netdev_ops = tms380tr_netdev_ops;
30101+ pax_open_kernel();
30102+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30103
30104- abyss_netdev_ops.ndo_open = abyss_open;
30105- abyss_netdev_ops.ndo_stop = abyss_close;
30106+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
30107+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
30108+ pax_close_kernel();
30109
30110 return pci_register_driver(&abyss_driver);
30111 }
30112diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
30113--- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
30114+++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
30115@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
30116
30117 static int __init madgemc_init (void)
30118 {
30119- madgemc_netdev_ops = tms380tr_netdev_ops;
30120- madgemc_netdev_ops.ndo_open = madgemc_open;
30121- madgemc_netdev_ops.ndo_stop = madgemc_close;
30122+ pax_open_kernel();
30123+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30124+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
30125+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
30126+ pax_close_kernel();
30127
30128 return mca_register_driver (&madgemc_driver);
30129 }
30130diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
30131--- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
30132+++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
30133@@ -353,9 +353,11 @@ static int __init proteon_init(void)
30134 struct platform_device *pdev;
30135 int i, num = 0, err = 0;
30136
30137- proteon_netdev_ops = tms380tr_netdev_ops;
30138- proteon_netdev_ops.ndo_open = proteon_open;
30139- proteon_netdev_ops.ndo_stop = tms380tr_close;
30140+ pax_open_kernel();
30141+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30142+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
30143+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
30144+ pax_close_kernel();
30145
30146 err = platform_driver_register(&proteon_driver);
30147 if (err)
30148diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
30149--- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
30150+++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
30151@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
30152 struct platform_device *pdev;
30153 int i, num = 0, err = 0;
30154
30155- sk_isa_netdev_ops = tms380tr_netdev_ops;
30156- sk_isa_netdev_ops.ndo_open = sk_isa_open;
30157- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30158+ pax_open_kernel();
30159+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30160+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
30161+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30162+ pax_close_kernel();
30163
30164 err = platform_driver_register(&sk_isa_driver);
30165 if (err)
30166diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
30167--- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
30168+++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
30169@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
30170 struct de_srom_info_leaf *il;
30171 void *bufp;
30172
30173+ pax_track_stack();
30174+
30175 /* download entire eeprom */
30176 for (i = 0; i < DE_EEPROM_WORDS; i++)
30177 ((__le16 *)ee_data)[i] =
30178diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
30179--- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
30180+++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
30181@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
30182 for (i=0; i<ETH_ALEN; i++) {
30183 tmp.addr[i] = dev->dev_addr[i];
30184 }
30185- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30186+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30187 break;
30188
30189 case DE4X5_SET_HWADDR: /* Set the hardware address */
30190@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
30191 spin_lock_irqsave(&lp->lock, flags);
30192 memcpy(&statbuf, &lp->pktStats, ioc->len);
30193 spin_unlock_irqrestore(&lp->lock, flags);
30194- if (copy_to_user(ioc->data, &statbuf, ioc->len))
30195+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
30196 return -EFAULT;
30197 break;
30198 }
30199diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
30200--- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
30201+++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
30202@@ -71,7 +71,7 @@
30203 #include <asm/byteorder.h>
30204 #include <linux/serial_core.h>
30205 #include <linux/serial.h>
30206-
30207+#include <asm/local.h>
30208
30209 #define MOD_AUTHOR "Option Wireless"
30210 #define MOD_DESCRIPTION "USB High Speed Option driver"
30211@@ -257,7 +257,7 @@ struct hso_serial {
30212
30213 /* from usb_serial_port */
30214 struct tty_struct *tty;
30215- int open_count;
30216+ local_t open_count;
30217 spinlock_t serial_lock;
30218
30219 int (*write_data) (struct hso_serial *serial);
30220@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
30221 struct urb *urb;
30222
30223 urb = serial->rx_urb[0];
30224- if (serial->open_count > 0) {
30225+ if (local_read(&serial->open_count) > 0) {
30226 count = put_rxbuf_data(urb, serial);
30227 if (count == -1)
30228 return;
30229@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
30230 DUMP1(urb->transfer_buffer, urb->actual_length);
30231
30232 /* Anyone listening? */
30233- if (serial->open_count == 0)
30234+ if (local_read(&serial->open_count) == 0)
30235 return;
30236
30237 if (status == 0) {
30238@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
30239 spin_unlock_irq(&serial->serial_lock);
30240
30241 /* check for port already opened, if not set the termios */
30242- serial->open_count++;
30243- if (serial->open_count == 1) {
30244+ if (local_inc_return(&serial->open_count) == 1) {
30245 serial->rx_state = RX_IDLE;
30246 /* Force default termio settings */
30247 _hso_serial_set_termios(tty, NULL);
30248@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
30249 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
30250 if (result) {
30251 hso_stop_serial_device(serial->parent);
30252- serial->open_count--;
30253+ local_dec(&serial->open_count);
30254 kref_put(&serial->parent->ref, hso_serial_ref_free);
30255 }
30256 } else {
30257@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
30258
30259 /* reset the rts and dtr */
30260 /* do the actual close */
30261- serial->open_count--;
30262+ local_dec(&serial->open_count);
30263
30264- if (serial->open_count <= 0) {
30265- serial->open_count = 0;
30266+ if (local_read(&serial->open_count) <= 0) {
30267+ local_set(&serial->open_count, 0);
30268 spin_lock_irq(&serial->serial_lock);
30269 if (serial->tty == tty) {
30270 serial->tty->driver_data = NULL;
30271@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
30272
30273 /* the actual setup */
30274 spin_lock_irqsave(&serial->serial_lock, flags);
30275- if (serial->open_count)
30276+ if (local_read(&serial->open_count))
30277 _hso_serial_set_termios(tty, old);
30278 else
30279 tty->termios = old;
30280@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
30281 D1("Pending read interrupt on port %d\n", i);
30282 spin_lock(&serial->serial_lock);
30283 if (serial->rx_state == RX_IDLE &&
30284- serial->open_count > 0) {
30285+ local_read(&serial->open_count) > 0) {
30286 /* Setup and send a ctrl req read on
30287 * port i */
30288 if (!serial->rx_urb_filled[0]) {
30289@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
30290 /* Start all serial ports */
30291 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
30292 if (serial_table[i] && (serial_table[i]->interface == iface)) {
30293- if (dev2ser(serial_table[i])->open_count) {
30294+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
30295 result =
30296 hso_start_serial_device(serial_table[i], GFP_NOIO);
30297 hso_kick_transmit(dev2ser(serial_table[i]));
30298diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
30299--- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30300+++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
30301@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
30302 * Return with error code if any of the queue indices
30303 * is out of range
30304 */
30305- if (p->ring_index[i] < 0 ||
30306- p->ring_index[i] >= adapter->num_rx_queues)
30307+ if (p->ring_index[i] >= adapter->num_rx_queues)
30308 return -EINVAL;
30309 }
30310
30311diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
30312--- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
30313+++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
30314@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
30315 void (*link_down)(struct __vxge_hw_device *devh);
30316 void (*crit_err)(struct __vxge_hw_device *devh,
30317 enum vxge_hw_event type, u64 ext_data);
30318-};
30319+} __no_const;
30320
30321 /*
30322 * struct __vxge_hw_blockpool_entry - Block private data structure
30323diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
30324--- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
30325+++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
30326@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
30327 struct sk_buff *completed[NR_SKB_COMPLETED];
30328 int more;
30329
30330+ pax_track_stack();
30331+
30332 do {
30333 more = 0;
30334 skb_ptr = completed;
30335@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
30336 u8 mtable[256] = {0}; /* CPU to vpath mapping */
30337 int index;
30338
30339+ pax_track_stack();
30340+
30341 /*
30342 * Filling
30343 * - itable with bucket numbers
30344diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
30345--- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
30346+++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
30347@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
30348 struct vxge_hw_mempool_dma *dma_object,
30349 u32 index,
30350 u32 is_last);
30351-};
30352+} __no_const;
30353
30354 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
30355 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
30356diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
30357--- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
30358+++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
30359@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
30360 unsigned char hex[1024],
30361 * phex = hex;
30362
30363+ pax_track_stack();
30364+
30365 if (len >= (sizeof(hex) / 2))
30366 len = (sizeof(hex) / 2) - 1;
30367
30368diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
30369--- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
30370+++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
30371@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
30372
30373 static int x25_open(struct net_device *dev)
30374 {
30375- struct lapb_register_struct cb;
30376+ static struct lapb_register_struct cb = {
30377+ .connect_confirmation = x25_connected,
30378+ .connect_indication = x25_connected,
30379+ .disconnect_confirmation = x25_disconnected,
30380+ .disconnect_indication = x25_disconnected,
30381+ .data_indication = x25_data_indication,
30382+ .data_transmit = x25_data_transmit
30383+ };
30384 int result;
30385
30386- cb.connect_confirmation = x25_connected;
30387- cb.connect_indication = x25_connected;
30388- cb.disconnect_confirmation = x25_disconnected;
30389- cb.disconnect_indication = x25_disconnected;
30390- cb.data_indication = x25_data_indication;
30391- cb.data_transmit = x25_data_transmit;
30392-
30393 result = lapb_register(dev, &cb);
30394 if (result != LAPB_OK)
30395 return result;
30396diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
30397--- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
30398+++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
30399@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
30400 int do_autopm = 1;
30401 DECLARE_COMPLETION_ONSTACK(notif_completion);
30402
30403+ pax_track_stack();
30404+
30405 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30406 i2400m, ack, ack_size);
30407 BUG_ON(_ack == i2400m->bm_ack_buf);
30408diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
30409--- linux-3.0.4/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
30410+++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
30411@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
30412 BSSListElement * loop_net;
30413 BSSListElement * tmp_net;
30414
30415+ pax_track_stack();
30416+
30417 /* Blow away current list of scan results */
30418 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30419 list_move_tail (&loop_net->list, &ai->network_free_list);
30420@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
30421 WepKeyRid wkr;
30422 int rc;
30423
30424+ pax_track_stack();
30425+
30426 memset( &mySsid, 0, sizeof( mySsid ) );
30427 kfree (ai->flash);
30428 ai->flash = NULL;
30429@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
30430 __le32 *vals = stats.vals;
30431 int len;
30432
30433+ pax_track_stack();
30434+
30435 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30436 return -ENOMEM;
30437 data = file->private_data;
30438@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
30439 /* If doLoseSync is not 1, we won't do a Lose Sync */
30440 int doLoseSync = -1;
30441
30442+ pax_track_stack();
30443+
30444 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30445 return -ENOMEM;
30446 data = file->private_data;
30447@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
30448 int i;
30449 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30450
30451+ pax_track_stack();
30452+
30453 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30454 if (!qual)
30455 return -ENOMEM;
30456@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
30457 CapabilityRid cap_rid;
30458 __le32 *vals = stats_rid.vals;
30459
30460+ pax_track_stack();
30461+
30462 /* Get stats out of the card */
30463 clear_bit(JOB_WSTATS, &local->jobs);
30464 if (local->power.event) {
30465diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
30466--- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
30467+++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
30468@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30469 unsigned int v;
30470 u64 tsf;
30471
30472+ pax_track_stack();
30473+
30474 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30475 len += snprintf(buf+len, sizeof(buf)-len,
30476 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30477@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30478 unsigned int len = 0;
30479 unsigned int i;
30480
30481+ pax_track_stack();
30482+
30483 len += snprintf(buf+len, sizeof(buf)-len,
30484 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30485
30486@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30487 unsigned int i;
30488 unsigned int v;
30489
30490+ pax_track_stack();
30491+
30492 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30493 sc->ah->ah_ant_mode);
30494 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30495@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30496 unsigned int len = 0;
30497 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30498
30499+ pax_track_stack();
30500+
30501 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30502 sc->bssidmask);
30503 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30504@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30505 unsigned int len = 0;
30506 int i;
30507
30508+ pax_track_stack();
30509+
30510 len += snprintf(buf+len, sizeof(buf)-len,
30511 "RX\n---------------------\n");
30512 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30513@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30514 char buf[700];
30515 unsigned int len = 0;
30516
30517+ pax_track_stack();
30518+
30519 len += snprintf(buf+len, sizeof(buf)-len,
30520 "HW has PHY error counters:\t%s\n",
30521 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30522@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30523 struct ath5k_buf *bf, *bf0;
30524 int i, n;
30525
30526+ pax_track_stack();
30527+
30528 len += snprintf(buf+len, sizeof(buf)-len,
30529 "available txbuffers: %d\n", sc->txbuf_len);
30530
30531diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30532--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
30533+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
30534@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30535 int i, im, j;
30536 int nmeasurement;
30537
30538+ pax_track_stack();
30539+
30540 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30541 if (ah->txchainmask & (1 << i))
30542 num_chains++;
30543diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30544--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
30545+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
30546@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30547 int theta_low_bin = 0;
30548 int i;
30549
30550+ pax_track_stack();
30551+
30552 /* disregard any bin that contains <= 16 samples */
30553 thresh_accum_cnt = 16;
30554 scale_factor = 5;
30555diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
30556--- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30557+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30558@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30559 char buf[512];
30560 unsigned int len = 0;
30561
30562+ pax_track_stack();
30563+
30564 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30565 len += snprintf(buf + len, sizeof(buf) - len,
30566 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30567@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30568 u8 addr[ETH_ALEN];
30569 u32 tmp;
30570
30571+ pax_track_stack();
30572+
30573 len += snprintf(buf + len, sizeof(buf) - len,
30574 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30575 wiphy_name(sc->hw->wiphy),
30576diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30577--- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30578+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30579@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30580 unsigned int len = 0;
30581 int ret = 0;
30582
30583+ pax_track_stack();
30584+
30585 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30586
30587 ath9k_htc_ps_wakeup(priv);
30588@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30589 unsigned int len = 0;
30590 int ret = 0;
30591
30592+ pax_track_stack();
30593+
30594 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30595
30596 ath9k_htc_ps_wakeup(priv);
30597@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30598 unsigned int len = 0;
30599 int ret = 0;
30600
30601+ pax_track_stack();
30602+
30603 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30604
30605 ath9k_htc_ps_wakeup(priv);
30606@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30607 char buf[512];
30608 unsigned int len = 0;
30609
30610+ pax_track_stack();
30611+
30612 len += snprintf(buf + len, sizeof(buf) - len,
30613 "%20s : %10u\n", "Buffers queued",
30614 priv->debug.tx_stats.buf_queued);
30615@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30616 char buf[512];
30617 unsigned int len = 0;
30618
30619+ pax_track_stack();
30620+
30621 spin_lock_bh(&priv->tx.tx_lock);
30622
30623 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30624@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30625 char buf[512];
30626 unsigned int len = 0;
30627
30628+ pax_track_stack();
30629+
30630 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30631 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30632
30633diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30634--- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
30635+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30636@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30637
30638 /* ANI */
30639 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30640-};
30641+} __no_const;
30642
30643 /**
30644 * struct ath_hw_ops - callbacks used by hardware code and driver code
30645@@ -637,7 +637,7 @@ struct ath_hw_ops {
30646 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30647 struct ath_hw_antcomb_conf *antconf);
30648
30649-};
30650+} __no_const;
30651
30652 struct ath_nf_limits {
30653 s16 max;
30654@@ -650,7 +650,7 @@ struct ath_nf_limits {
30655 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30656
30657 struct ath_hw {
30658- struct ath_ops reg_ops;
30659+ ath_ops_no_const reg_ops;
30660
30661 struct ieee80211_hw *hw;
30662 struct ath_common common;
30663diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30664--- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30665+++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30666@@ -121,6 +121,7 @@ struct ath_ops {
30667 void (*write_flush) (void *);
30668 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30669 };
30670+typedef struct ath_ops __no_const ath_ops_no_const;
30671
30672 struct ath_common;
30673 struct ath_bus_ops;
30674diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30675--- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30676+++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30677@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30678 int err;
30679 DECLARE_SSID_BUF(ssid);
30680
30681+ pax_track_stack();
30682+
30683 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30684
30685 if (ssid_len)
30686@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30687 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30688 int err;
30689
30690+ pax_track_stack();
30691+
30692 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30693 idx, keylen, len);
30694
30695diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30696--- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30697+++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30698@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30699 unsigned long flags;
30700 DECLARE_SSID_BUF(ssid);
30701
30702+ pax_track_stack();
30703+
30704 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30705 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30706 print_ssid(ssid, info_element->data, info_element->len),
30707diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30708--- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30709+++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30710@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30711 */
30712 if (iwl3945_mod_params.disable_hw_scan) {
30713 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30714- iwl3945_hw_ops.hw_scan = NULL;
30715+ pax_open_kernel();
30716+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30717+ pax_close_kernel();
30718 }
30719
30720 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30721diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30722--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30723+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30724@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30725 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30726 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30727
30728+ pax_track_stack();
30729+
30730 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30731
30732 /* Treat uninitialized rate scaling data same as non-existing. */
30733@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30734 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30735 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30736
30737+ pax_track_stack();
30738+
30739 /* Override starting rate (index 0) if needed for debug purposes */
30740 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30741
30742diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30743--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30744+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30745@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30746 int pos = 0;
30747 const size_t bufsz = sizeof(buf);
30748
30749+ pax_track_stack();
30750+
30751 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30752 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30753 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30754@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30755 char buf[256 * NUM_IWL_RXON_CTX];
30756 const size_t bufsz = sizeof(buf);
30757
30758+ pax_track_stack();
30759+
30760 for_each_context(priv, ctx) {
30761 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30762 ctx->ctxid);
30763diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30764--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30765+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30766@@ -68,8 +68,8 @@ do {
30767 } while (0)
30768
30769 #else
30770-#define IWL_DEBUG(__priv, level, fmt, args...)
30771-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30772+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30773+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30774 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30775 const void *p, u32 len)
30776 {}
30777diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30778--- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30779+++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30780@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30781 int buf_len = 512;
30782 size_t len = 0;
30783
30784+ pax_track_stack();
30785+
30786 if (*ppos != 0)
30787 return 0;
30788 if (count < sizeof(buf))
30789diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30790--- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30791+++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30792@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30793 return -EINVAL;
30794
30795 if (fake_hw_scan) {
30796- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30797- mac80211_hwsim_ops.sw_scan_start = NULL;
30798- mac80211_hwsim_ops.sw_scan_complete = NULL;
30799+ pax_open_kernel();
30800+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30801+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30802+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30803+ pax_close_kernel();
30804 }
30805
30806 spin_lock_init(&hwsim_radio_lock);
30807diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30808--- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30809+++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30810@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30811
30812 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30813
30814- if (rts_threshold < 0 || rts_threshold > 2347)
30815+ if (rts_threshold > 2347)
30816 rts_threshold = 2347;
30817
30818 tmp = cpu_to_le32(rts_threshold);
30819diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30820--- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30821+++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30822@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30823 u8 rfpath;
30824 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30825
30826+ pax_track_stack();
30827+
30828 precommoncmdcnt = 0;
30829 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30830 MAX_PRECMD_CNT,
30831diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30832--- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30833+++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30834@@ -266,7 +266,7 @@ struct wl1251_if_operations {
30835 void (*reset)(struct wl1251 *wl);
30836 void (*enable_irq)(struct wl1251 *wl);
30837 void (*disable_irq)(struct wl1251 *wl);
30838-};
30839+} __no_const;
30840
30841 struct wl1251 {
30842 struct ieee80211_hw *hw;
30843diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30844--- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30845+++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30846@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30847 u32 chunk_len;
30848 int i;
30849
30850+ pax_track_stack();
30851+
30852 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30853
30854 spi_message_init(&m);
30855diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30856--- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30857+++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30858@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30859 if (cookie == NO_COOKIE)
30860 offset = pc;
30861 if (cookie == INVALID_COOKIE) {
30862- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30863+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30864 offset = pc;
30865 }
30866 if (cookie != last_cookie) {
30867@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30868 /* add userspace sample */
30869
30870 if (!mm) {
30871- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30872+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30873 return 0;
30874 }
30875
30876 cookie = lookup_dcookie(mm, s->eip, &offset);
30877
30878 if (cookie == INVALID_COOKIE) {
30879- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30880+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30881 return 0;
30882 }
30883
30884@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30885 /* ignore backtraces if failed to add a sample */
30886 if (state == sb_bt_start) {
30887 state = sb_bt_ignore;
30888- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30889+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30890 }
30891 }
30892 release_mm(mm);
30893diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30894--- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30895+++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30896@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30897 }
30898
30899 if (buffer_pos == buffer_size) {
30900- atomic_inc(&oprofile_stats.event_lost_overflow);
30901+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30902 return;
30903 }
30904
30905diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30906--- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30907+++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30908@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30909 if (oprofile_ops.switch_events())
30910 return;
30911
30912- atomic_inc(&oprofile_stats.multiplex_counter);
30913+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30914 start_switch_worker();
30915 }
30916
30917diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30918--- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30919+++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30920@@ -186,7 +186,7 @@ static const struct file_operations atom
30921
30922
30923 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30924- char const *name, atomic_t *val)
30925+ char const *name, atomic_unchecked_t *val)
30926 {
30927 return __oprofilefs_create_file(sb, root, name,
30928 &atomic_ro_fops, 0444, val);
30929diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30930--- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30931+++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30932@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30933 cpu_buf->sample_invalid_eip = 0;
30934 }
30935
30936- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30937- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30938- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30939- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30940- atomic_set(&oprofile_stats.multiplex_counter, 0);
30941+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30942+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30943+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30944+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30945+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30946 }
30947
30948
30949diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30950--- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30951+++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30952@@ -13,11 +13,11 @@
30953 #include <asm/atomic.h>
30954
30955 struct oprofile_stat_struct {
30956- atomic_t sample_lost_no_mm;
30957- atomic_t sample_lost_no_mapping;
30958- atomic_t bt_lost_no_mapping;
30959- atomic_t event_lost_overflow;
30960- atomic_t multiplex_counter;
30961+ atomic_unchecked_t sample_lost_no_mm;
30962+ atomic_unchecked_t sample_lost_no_mapping;
30963+ atomic_unchecked_t bt_lost_no_mapping;
30964+ atomic_unchecked_t event_lost_overflow;
30965+ atomic_unchecked_t multiplex_counter;
30966 };
30967
30968 extern struct oprofile_stat_struct oprofile_stats;
30969diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30970--- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30971+++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30972@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30973
30974 *ppos += len;
30975
30976- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30977+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30978 }
30979
30980 #ifdef CONFIG_PARPORT_1284
30981@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30982
30983 *ppos += len;
30984
30985- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30986+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30987 }
30988 #endif /* IEEE1284.3 support. */
30989
30990diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30991--- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30992+++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30993@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30994 int (*hardware_test) (struct slot* slot, u32 value);
30995 u8 (*get_power) (struct slot* slot);
30996 int (*set_power) (struct slot* slot, int value);
30997-};
30998+} __no_const;
30999
31000 struct cpci_hp_controller {
31001 unsigned int irq;
31002diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
31003--- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
31004+++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
31005@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
31006
31007 void compaq_nvram_init (void __iomem *rom_start)
31008 {
31009+
31010+#ifndef CONFIG_PAX_KERNEXEC
31011 if (rom_start) {
31012 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
31013 }
31014+#endif
31015+
31016 dbg("int15 entry = %p\n", compaq_int15_entry_point);
31017
31018 /* initialize our int15 lock */
31019diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
31020--- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
31021+++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
31022@@ -27,9 +27,9 @@
31023 #define MODULE_PARAM_PREFIX "pcie_aspm."
31024
31025 /* Note: those are not register definitions */
31026-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31027-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
31028-#define ASPM_STATE_L1 (4) /* L1 state */
31029+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
31030+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
31031+#define ASPM_STATE_L1 (4U) /* L1 state */
31032 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
31033 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
31034
31035diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
31036--- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
31037+++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
31038@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
31039 u32 l, sz, mask;
31040 u16 orig_cmd;
31041
31042- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
31043+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
31044
31045 if (!dev->mmio_always_on) {
31046 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
31047diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
31048--- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
31049+++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
31050@@ -476,7 +476,16 @@ static const struct file_operations proc
31051 static int __init pci_proc_init(void)
31052 {
31053 struct pci_dev *dev = NULL;
31054+
31055+#ifdef CONFIG_GRKERNSEC_PROC_ADD
31056+#ifdef CONFIG_GRKERNSEC_PROC_USER
31057+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
31058+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31059+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
31060+#endif
31061+#else
31062 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
31063+#endif
31064 proc_create("devices", 0, proc_bus_pci_dir,
31065 &proc_bus_pci_dev_operations);
31066 proc_initialized = 1;
31067diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
31068--- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
31069+++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
31070@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
31071 struct pcifront_sd *sd = bus->sysdata;
31072 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31073
31074+ pax_track_stack();
31075+
31076 if (verbose_request)
31077 dev_info(&pdev->xdev->dev,
31078 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
31079@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
31080 struct pcifront_sd *sd = bus->sysdata;
31081 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31082
31083+ pax_track_stack();
31084+
31085 if (verbose_request)
31086 dev_info(&pdev->xdev->dev,
31087 "write dev=%04x:%02x:%02x.%01x - "
31088@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
31089 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31090 struct msi_desc *entry;
31091
31092+ pax_track_stack();
31093+
31094 if (nvec > SH_INFO_MAX_VEC) {
31095 dev_err(&dev->dev, "too much vector for pci frontend: %x."
31096 " Increase SH_INFO_MAX_VEC.\n", nvec);
31097@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
31098 struct pcifront_sd *sd = dev->bus->sysdata;
31099 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31100
31101+ pax_track_stack();
31102+
31103 err = do_pci_op(pdev, &op);
31104
31105 /* What should do for error ? */
31106@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
31107 struct pcifront_sd *sd = dev->bus->sysdata;
31108 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31109
31110+ pax_track_stack();
31111+
31112 err = do_pci_op(pdev, &op);
31113 if (likely(!err)) {
31114 vector[0] = op.value;
31115diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
31116--- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
31117+++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
31118@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
31119 return 0;
31120 }
31121
31122-void static hotkey_mask_warn_incomplete_mask(void)
31123+static void hotkey_mask_warn_incomplete_mask(void)
31124 {
31125 /* log only what the user can fix... */
31126 const u32 wantedmask = hotkey_driver_mask &
31127diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
31128--- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
31129+++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
31130@@ -59,7 +59,7 @@ do { \
31131 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
31132 } while(0)
31133
31134-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
31135+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
31136 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
31137
31138 /*
31139@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
31140
31141 cpu = get_cpu();
31142 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
31143+
31144+ pax_open_kernel();
31145 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
31146+ pax_close_kernel();
31147
31148 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
31149 spin_lock_irqsave(&pnp_bios_lock, flags);
31150@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
31151 :"memory");
31152 spin_unlock_irqrestore(&pnp_bios_lock, flags);
31153
31154+ pax_open_kernel();
31155 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
31156+ pax_close_kernel();
31157+
31158 put_cpu();
31159
31160 /* If we get here and this is set then the PnP BIOS faulted on us. */
31161@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
31162 return status;
31163 }
31164
31165-void pnpbios_calls_init(union pnp_bios_install_struct *header)
31166+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
31167 {
31168 int i;
31169
31170@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
31171 pnp_bios_callpoint.offset = header->fields.pm16offset;
31172 pnp_bios_callpoint.segment = PNP_CS16;
31173
31174+ pax_open_kernel();
31175+
31176 for_each_possible_cpu(i) {
31177 struct desc_struct *gdt = get_cpu_gdt_table(i);
31178 if (!gdt)
31179@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
31180 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
31181 (unsigned long)__va(header->fields.pm16dseg));
31182 }
31183+
31184+ pax_close_kernel();
31185 }
31186diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
31187--- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
31188+++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
31189@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
31190 return 1;
31191
31192 /* check if the resource is valid */
31193- if (*irq < 0 || *irq > 15)
31194+ if (*irq > 15)
31195 return 0;
31196
31197 /* check if the resource is reserved */
31198@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
31199 return 1;
31200
31201 /* check if the resource is valid */
31202- if (*dma < 0 || *dma == 4 || *dma > 7)
31203+ if (*dma == 4 || *dma > 7)
31204 return 0;
31205
31206 /* check if the resource is reserved */
31207diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
31208--- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
31209+++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
31210@@ -67,7 +67,7 @@
31211 struct bq27x00_device_info;
31212 struct bq27x00_access_methods {
31213 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
31214-};
31215+} __no_const;
31216
31217 enum bq27x00_chip { BQ27000, BQ27500 };
31218
31219diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
31220--- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
31221+++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
31222@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
31223 max8660->shadow_regs[MAX8660_OVER1] = 5;
31224 } else {
31225 /* Otherwise devices can be toggled via software */
31226- max8660_dcdc_ops.enable = max8660_dcdc_enable;
31227- max8660_dcdc_ops.disable = max8660_dcdc_disable;
31228+ pax_open_kernel();
31229+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
31230+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
31231+ pax_close_kernel();
31232 }
31233
31234 /*
31235diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
31236--- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
31237+++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
31238@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
31239 }
31240 mc13xxx_unlock(mc13892);
31241
31242- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
31243+ pax_open_kernel();
31244+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
31245 = mc13892_vcam_set_mode;
31246- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
31247+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
31248 = mc13892_vcam_get_mode;
31249+ pax_close_kernel();
31250 for (i = 0; i < pdata->num_regulators; i++) {
31251 init_data = &pdata->regulators[i];
31252 priv->regulators[i] = regulator_register(
31253diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
31254--- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
31255+++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
31256@@ -14,6 +14,7 @@
31257 #include <linux/module.h>
31258 #include <linux/rtc.h>
31259 #include <linux/sched.h>
31260+#include <linux/grsecurity.h>
31261 #include "rtc-core.h"
31262
31263 static dev_t rtc_devt;
31264@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
31265 if (copy_from_user(&tm, uarg, sizeof(tm)))
31266 return -EFAULT;
31267
31268+ gr_log_timechange();
31269+
31270 return rtc_set_time(rtc, &tm);
31271
31272 case RTC_PIE_ON:
31273diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
31274--- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
31275+++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
31276@@ -492,7 +492,7 @@ struct adapter_ops
31277 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
31278 /* Administrative operations */
31279 int (*adapter_comm)(struct aac_dev * dev, int comm);
31280-};
31281+} __no_const;
31282
31283 /*
31284 * Define which interrupt handler needs to be installed
31285diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
31286--- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
31287+++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
31288@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
31289 u32 actual_fibsize64, actual_fibsize = 0;
31290 int i;
31291
31292+ pax_track_stack();
31293
31294 if (dev->in_reset) {
31295 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
31296diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
31297--- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
31298+++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
31299@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
31300 struct bfad_vport_s *vport, *vport_new;
31301 struct bfa_fcs_driver_info_s driver_info;
31302
31303+ pax_track_stack();
31304+
31305 /* Fill the driver_info info to fcs*/
31306 memset(&driver_info, 0, sizeof(driver_info));
31307 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
31308diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
31309--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
31310+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
31311@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
31312 u16 len, count;
31313 u16 templen;
31314
31315+ pax_track_stack();
31316+
31317 /*
31318 * get hba attributes
31319 */
31320@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
31321 u8 count = 0;
31322 u16 templen;
31323
31324+ pax_track_stack();
31325+
31326 /*
31327 * get port attributes
31328 */
31329diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
31330--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
31331+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
31332@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
31333 struct fc_rpsc_speed_info_s speeds;
31334 struct bfa_port_attr_s pport_attr;
31335
31336+ pax_track_stack();
31337+
31338 bfa_trc(port->fcs, rx_fchs->s_id);
31339 bfa_trc(port->fcs, rx_fchs->d_id);
31340
31341diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
31342--- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
31343+++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
31344@@ -238,7 +238,7 @@ struct bfa_hwif_s {
31345 u32 *nvecs, u32 *maxvec);
31346 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
31347 u32 *end);
31348-};
31349+} __no_const;
31350 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
31351
31352 struct bfa_iocfc_s {
31353diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
31354--- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
31355+++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
31356@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
31357 bfa_ioc_disable_cbfn_t disable_cbfn;
31358 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
31359 bfa_ioc_reset_cbfn_t reset_cbfn;
31360-};
31361+} __no_const;
31362
31363 /*
31364 * Heartbeat failure notification queue element.
31365@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
31366 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
31367 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
31368 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
31369-};
31370+} __no_const;
31371
31372 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
31373 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
31374diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
31375--- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
31376+++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
31377@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
31378 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
31379 *PrototypeHostAdapter)
31380 {
31381+ pax_track_stack();
31382+
31383 /*
31384 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
31385 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
31386diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
31387--- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
31388+++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
31389@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
31390 dma_addr_t addr;
31391 ulong flags = 0;
31392
31393+ pax_track_stack();
31394+
31395 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
31396 // get user msg size in u32s
31397 if(get_user(size, &user_msg[0])){
31398@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31399 s32 rcode;
31400 dma_addr_t addr;
31401
31402+ pax_track_stack();
31403+
31404 memset(msg, 0 , sizeof(msg));
31405 len = scsi_bufflen(cmd);
31406 direction = 0x00000000;
31407diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
31408--- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
31409+++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
31410@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31411 struct hostdata *ha;
31412 char name[16];
31413
31414+ pax_track_stack();
31415+
31416 sprintf(name, "%s%d", driver_name, j);
31417
31418 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31419diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
31420--- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
31421+++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
31422@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31423 } buf;
31424 int rc;
31425
31426+ pax_track_stack();
31427+
31428 fiph = (struct fip_header *)skb->data;
31429 sub = fiph->fip_subcode;
31430
31431diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
31432--- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
31433+++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
31434@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31435 unsigned long flags;
31436 gdth_ha_str *ha;
31437
31438+ pax_track_stack();
31439+
31440 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31441 return -EFAULT;
31442 ha = gdth_find_ha(ldrv.ionode);
31443@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31444 gdth_ha_str *ha;
31445 int rval;
31446
31447+ pax_track_stack();
31448+
31449 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31450 res.number >= MAX_HDRIVES)
31451 return -EFAULT;
31452@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31453 gdth_ha_str *ha;
31454 int rval;
31455
31456+ pax_track_stack();
31457+
31458 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31459 return -EFAULT;
31460 ha = gdth_find_ha(gen.ionode);
31461@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31462 int i;
31463 gdth_cmd_str gdtcmd;
31464 char cmnd[MAX_COMMAND_SIZE];
31465+
31466+ pax_track_stack();
31467+
31468 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31469
31470 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31471diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
31472--- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
31473+++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
31474@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31475 u64 paddr;
31476
31477 char cmnd[MAX_COMMAND_SIZE];
31478+
31479+ pax_track_stack();
31480+
31481 memset(cmnd, 0xff, 12);
31482 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31483
31484@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31485 gdth_hget_str *phg;
31486 char cmnd[MAX_COMMAND_SIZE];
31487
31488+ pax_track_stack();
31489+
31490 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31491 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31492 if (!gdtcmd || !estr)
31493diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
31494--- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
31495+++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
31496@@ -42,7 +42,7 @@
31497 #include "scsi_logging.h"
31498
31499
31500-static atomic_t scsi_host_next_hn; /* host_no for next new host */
31501+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31502
31503
31504 static void scsi_host_cls_release(struct device *dev)
31505@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31506 * subtract one because we increment first then return, but we need to
31507 * know what the next host number was before increment
31508 */
31509- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31510+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31511 shost->dma_channel = 0xff;
31512
31513 /* These three are default values which can be overridden */
31514diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
31515--- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
31516+++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
31517@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
31518 u32 a;
31519
31520 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31521- return h->access.command_completed(h);
31522+ return h->access->command_completed(h);
31523
31524 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31525 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31526@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
31527 while (!list_empty(&h->reqQ)) {
31528 c = list_entry(h->reqQ.next, struct CommandList, list);
31529 /* can't do anything if fifo is full */
31530- if ((h->access.fifo_full(h))) {
31531+ if ((h->access->fifo_full(h))) {
31532 dev_warn(&h->pdev->dev, "fifo full\n");
31533 break;
31534 }
31535@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
31536 h->Qdepth--;
31537
31538 /* Tell the controller execute command */
31539- h->access.submit_command(h, c);
31540+ h->access->submit_command(h, c);
31541
31542 /* Put job onto the completed Q */
31543 addQ(&h->cmpQ, c);
31544@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
31545
31546 static inline unsigned long get_next_completion(struct ctlr_info *h)
31547 {
31548- return h->access.command_completed(h);
31549+ return h->access->command_completed(h);
31550 }
31551
31552 static inline bool interrupt_pending(struct ctlr_info *h)
31553 {
31554- return h->access.intr_pending(h);
31555+ return h->access->intr_pending(h);
31556 }
31557
31558 static inline long interrupt_not_for_us(struct ctlr_info *h)
31559 {
31560- return (h->access.intr_pending(h) == 0) ||
31561+ return (h->access->intr_pending(h) == 0) ||
31562 (h->interrupts_enabled == 0);
31563 }
31564
31565@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31566 if (prod_index < 0)
31567 return -ENODEV;
31568 h->product_name = products[prod_index].product_name;
31569- h->access = *(products[prod_index].access);
31570+ h->access = products[prod_index].access;
31571
31572 if (hpsa_board_disabled(h->pdev)) {
31573 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31574@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31575 }
31576
31577 /* make sure the board interrupts are off */
31578- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31579+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31580
31581 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31582 goto clean2;
31583@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31584 * fake ones to scoop up any residual completions.
31585 */
31586 spin_lock_irqsave(&h->lock, flags);
31587- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31588+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31589 spin_unlock_irqrestore(&h->lock, flags);
31590 free_irq(h->intr[h->intr_mode], h);
31591 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31592@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31593 dev_info(&h->pdev->dev, "Board READY.\n");
31594 dev_info(&h->pdev->dev,
31595 "Waiting for stale completions to drain.\n");
31596- h->access.set_intr_mask(h, HPSA_INTR_ON);
31597+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31598 msleep(10000);
31599- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31600+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31601
31602 rc = controller_reset_failed(h->cfgtable);
31603 if (rc)
31604@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31605 }
31606
31607 /* Turn the interrupts on so we can service requests */
31608- h->access.set_intr_mask(h, HPSA_INTR_ON);
31609+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31610
31611 hpsa_hba_inquiry(h);
31612 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31613@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31614 * To write all data in the battery backed cache to disks
31615 */
31616 hpsa_flush_cache(h);
31617- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31618+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31619 free_irq(h->intr[h->intr_mode], h);
31620 #ifdef CONFIG_PCI_MSI
31621 if (h->msix_vector)
31622@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31623 return;
31624 }
31625 /* Change the access methods to the performant access methods */
31626- h->access = SA5_performant_access;
31627+ h->access = &SA5_performant_access;
31628 h->transMethod = CFGTBL_Trans_Performant;
31629 }
31630
31631diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31632--- linux-3.0.4/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
31633+++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31634@@ -73,7 +73,7 @@ struct ctlr_info {
31635 unsigned int msix_vector;
31636 unsigned int msi_vector;
31637 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31638- struct access_method access;
31639+ struct access_method *access;
31640
31641 /* queue and queue Info */
31642 struct list_head reqQ;
31643diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31644--- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31645+++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31646@@ -1027,7 +1027,7 @@ typedef struct {
31647 int (*intr)(struct ips_ha *);
31648 void (*enableint)(struct ips_ha *);
31649 uint32_t (*statupd)(struct ips_ha *);
31650-} ips_hw_func_t;
31651+} __no_const ips_hw_func_t;
31652
31653 typedef struct ips_ha {
31654 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31655diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31656--- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31657+++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31658@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31659 * all together if not used XXX
31660 */
31661 struct {
31662- atomic_t no_free_exch;
31663- atomic_t no_free_exch_xid;
31664- atomic_t xid_not_found;
31665- atomic_t xid_busy;
31666- atomic_t seq_not_found;
31667- atomic_t non_bls_resp;
31668+ atomic_unchecked_t no_free_exch;
31669+ atomic_unchecked_t no_free_exch_xid;
31670+ atomic_unchecked_t xid_not_found;
31671+ atomic_unchecked_t xid_busy;
31672+ atomic_unchecked_t seq_not_found;
31673+ atomic_unchecked_t non_bls_resp;
31674 } stats;
31675 };
31676
31677@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31678 /* allocate memory for exchange */
31679 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31680 if (!ep) {
31681- atomic_inc(&mp->stats.no_free_exch);
31682+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31683 goto out;
31684 }
31685 memset(ep, 0, sizeof(*ep));
31686@@ -761,7 +761,7 @@ out:
31687 return ep;
31688 err:
31689 spin_unlock_bh(&pool->lock);
31690- atomic_inc(&mp->stats.no_free_exch_xid);
31691+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31692 mempool_free(ep, mp->ep_pool);
31693 return NULL;
31694 }
31695@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31696 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31697 ep = fc_exch_find(mp, xid);
31698 if (!ep) {
31699- atomic_inc(&mp->stats.xid_not_found);
31700+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31701 reject = FC_RJT_OX_ID;
31702 goto out;
31703 }
31704@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31705 ep = fc_exch_find(mp, xid);
31706 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31707 if (ep) {
31708- atomic_inc(&mp->stats.xid_busy);
31709+ atomic_inc_unchecked(&mp->stats.xid_busy);
31710 reject = FC_RJT_RX_ID;
31711 goto rel;
31712 }
31713@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31714 }
31715 xid = ep->xid; /* get our XID */
31716 } else if (!ep) {
31717- atomic_inc(&mp->stats.xid_not_found);
31718+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31719 reject = FC_RJT_RX_ID; /* XID not found */
31720 goto out;
31721 }
31722@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31723 } else {
31724 sp = &ep->seq;
31725 if (sp->id != fh->fh_seq_id) {
31726- atomic_inc(&mp->stats.seq_not_found);
31727+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31728 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31729 goto rel;
31730 }
31731@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31732
31733 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31734 if (!ep) {
31735- atomic_inc(&mp->stats.xid_not_found);
31736+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31737 goto out;
31738 }
31739 if (ep->esb_stat & ESB_ST_COMPLETE) {
31740- atomic_inc(&mp->stats.xid_not_found);
31741+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31742 goto rel;
31743 }
31744 if (ep->rxid == FC_XID_UNKNOWN)
31745 ep->rxid = ntohs(fh->fh_rx_id);
31746 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31747- atomic_inc(&mp->stats.xid_not_found);
31748+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31749 goto rel;
31750 }
31751 if (ep->did != ntoh24(fh->fh_s_id) &&
31752 ep->did != FC_FID_FLOGI) {
31753- atomic_inc(&mp->stats.xid_not_found);
31754+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31755 goto rel;
31756 }
31757 sof = fr_sof(fp);
31758@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31759 sp->ssb_stat |= SSB_ST_RESP;
31760 sp->id = fh->fh_seq_id;
31761 } else if (sp->id != fh->fh_seq_id) {
31762- atomic_inc(&mp->stats.seq_not_found);
31763+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31764 goto rel;
31765 }
31766
31767@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31768 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31769
31770 if (!sp)
31771- atomic_inc(&mp->stats.xid_not_found);
31772+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31773 else
31774- atomic_inc(&mp->stats.non_bls_resp);
31775+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31776
31777 fc_frame_free(fp);
31778 }
31779diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31780--- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31781+++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31782@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31783 .postreset = ata_std_postreset,
31784 .error_handler = ata_std_error_handler,
31785 .post_internal_cmd = sas_ata_post_internal,
31786- .qc_defer = ata_std_qc_defer,
31787+ .qc_defer = ata_std_qc_defer,
31788 .qc_prep = ata_noop_qc_prep,
31789 .qc_issue = sas_ata_qc_issue,
31790 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31791diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31792--- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31793+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31794@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31795
31796 #include <linux/debugfs.h>
31797
31798-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31799+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31800 static unsigned long lpfc_debugfs_start_time = 0L;
31801
31802 /* iDiag */
31803@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31804 lpfc_debugfs_enable = 0;
31805
31806 len = 0;
31807- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31808+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31809 (lpfc_debugfs_max_disc_trc - 1);
31810 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31811 dtp = vport->disc_trc + i;
31812@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31813 lpfc_debugfs_enable = 0;
31814
31815 len = 0;
31816- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31817+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31818 (lpfc_debugfs_max_slow_ring_trc - 1);
31819 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31820 dtp = phba->slow_ring_trc + i;
31821@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31822 uint32_t *ptr;
31823 char buffer[1024];
31824
31825+ pax_track_stack();
31826+
31827 off = 0;
31828 spin_lock_irq(&phba->hbalock);
31829
31830@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31831 !vport || !vport->disc_trc)
31832 return;
31833
31834- index = atomic_inc_return(&vport->disc_trc_cnt) &
31835+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31836 (lpfc_debugfs_max_disc_trc - 1);
31837 dtp = vport->disc_trc + index;
31838 dtp->fmt = fmt;
31839 dtp->data1 = data1;
31840 dtp->data2 = data2;
31841 dtp->data3 = data3;
31842- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31843+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31844 dtp->jif = jiffies;
31845 #endif
31846 return;
31847@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31848 !phba || !phba->slow_ring_trc)
31849 return;
31850
31851- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31852+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31853 (lpfc_debugfs_max_slow_ring_trc - 1);
31854 dtp = phba->slow_ring_trc + index;
31855 dtp->fmt = fmt;
31856 dtp->data1 = data1;
31857 dtp->data2 = data2;
31858 dtp->data3 = data3;
31859- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31860+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31861 dtp->jif = jiffies;
31862 #endif
31863 return;
31864@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31865 "slow_ring buffer\n");
31866 goto debug_failed;
31867 }
31868- atomic_set(&phba->slow_ring_trc_cnt, 0);
31869+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31870 memset(phba->slow_ring_trc, 0,
31871 (sizeof(struct lpfc_debugfs_trc) *
31872 lpfc_debugfs_max_slow_ring_trc));
31873@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31874 "buffer\n");
31875 goto debug_failed;
31876 }
31877- atomic_set(&vport->disc_trc_cnt, 0);
31878+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31879
31880 snprintf(name, sizeof(name), "discovery_trace");
31881 vport->debug_disc_trc =
31882diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31883--- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31884+++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31885@@ -420,7 +420,7 @@ struct lpfc_vport {
31886 struct dentry *debug_nodelist;
31887 struct dentry *vport_debugfs_root;
31888 struct lpfc_debugfs_trc *disc_trc;
31889- atomic_t disc_trc_cnt;
31890+ atomic_unchecked_t disc_trc_cnt;
31891 #endif
31892 uint8_t stat_data_enabled;
31893 uint8_t stat_data_blocked;
31894@@ -826,8 +826,8 @@ struct lpfc_hba {
31895 struct timer_list fabric_block_timer;
31896 unsigned long bit_flags;
31897 #define FABRIC_COMANDS_BLOCKED 0
31898- atomic_t num_rsrc_err;
31899- atomic_t num_cmd_success;
31900+ atomic_unchecked_t num_rsrc_err;
31901+ atomic_unchecked_t num_cmd_success;
31902 unsigned long last_rsrc_error_time;
31903 unsigned long last_ramp_down_time;
31904 unsigned long last_ramp_up_time;
31905@@ -841,7 +841,7 @@ struct lpfc_hba {
31906 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31907 struct dentry *debug_slow_ring_trc;
31908 struct lpfc_debugfs_trc *slow_ring_trc;
31909- atomic_t slow_ring_trc_cnt;
31910+ atomic_unchecked_t slow_ring_trc_cnt;
31911 /* iDiag debugfs sub-directory */
31912 struct dentry *idiag_root;
31913 struct dentry *idiag_pci_cfg;
31914diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31915--- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31916+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31917@@ -9923,8 +9923,10 @@ lpfc_init(void)
31918 printk(LPFC_COPYRIGHT "\n");
31919
31920 if (lpfc_enable_npiv) {
31921- lpfc_transport_functions.vport_create = lpfc_vport_create;
31922- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31923+ pax_open_kernel();
31924+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31925+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31926+ pax_close_kernel();
31927 }
31928 lpfc_transport_template =
31929 fc_attach_transport(&lpfc_transport_functions);
31930diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31931--- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31932+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31933@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31934 uint32_t evt_posted;
31935
31936 spin_lock_irqsave(&phba->hbalock, flags);
31937- atomic_inc(&phba->num_rsrc_err);
31938+ atomic_inc_unchecked(&phba->num_rsrc_err);
31939 phba->last_rsrc_error_time = jiffies;
31940
31941 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31942@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31943 unsigned long flags;
31944 struct lpfc_hba *phba = vport->phba;
31945 uint32_t evt_posted;
31946- atomic_inc(&phba->num_cmd_success);
31947+ atomic_inc_unchecked(&phba->num_cmd_success);
31948
31949 if (vport->cfg_lun_queue_depth <= queue_depth)
31950 return;
31951@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31952 unsigned long num_rsrc_err, num_cmd_success;
31953 int i;
31954
31955- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31956- num_cmd_success = atomic_read(&phba->num_cmd_success);
31957+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31958+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31959
31960 vports = lpfc_create_vport_work_array(phba);
31961 if (vports != NULL)
31962@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31963 }
31964 }
31965 lpfc_destroy_vport_work_array(phba, vports);
31966- atomic_set(&phba->num_rsrc_err, 0);
31967- atomic_set(&phba->num_cmd_success, 0);
31968+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31969+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31970 }
31971
31972 /**
31973@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31974 }
31975 }
31976 lpfc_destroy_vport_work_array(phba, vports);
31977- atomic_set(&phba->num_rsrc_err, 0);
31978- atomic_set(&phba->num_cmd_success, 0);
31979+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31980+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31981 }
31982
31983 /**
31984diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31985--- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31986+++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31987@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31988 int rval;
31989 int i;
31990
31991+ pax_track_stack();
31992+
31993 // Allocate memory for the base list of scb for management module.
31994 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31995
31996diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31997--- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31998+++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31999@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
32000 int nelem = ARRAY_SIZE(get_attrs), a = 0;
32001 int ret;
32002
32003+ pax_track_stack();
32004+
32005 or = osd_start_request(od, GFP_KERNEL);
32006 if (!or)
32007 return -ENOMEM;
32008diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
32009--- linux-3.0.4/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
32010+++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
32011@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
32012 res->scsi_dev = scsi_dev;
32013 scsi_dev->hostdata = res;
32014 res->change_detected = 0;
32015- atomic_set(&res->read_failures, 0);
32016- atomic_set(&res->write_failures, 0);
32017+ atomic_set_unchecked(&res->read_failures, 0);
32018+ atomic_set_unchecked(&res->write_failures, 0);
32019 rc = 0;
32020 }
32021 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
32022@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
32023
32024 /* If this was a SCSI read/write command keep count of errors */
32025 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
32026- atomic_inc(&res->read_failures);
32027+ atomic_inc_unchecked(&res->read_failures);
32028 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
32029- atomic_inc(&res->write_failures);
32030+ atomic_inc_unchecked(&res->write_failures);
32031
32032 if (!RES_IS_GSCSI(res->cfg_entry) &&
32033 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
32034@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
32035 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32036 * hrrq_id assigned here in queuecommand
32037 */
32038- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32039+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32040 pinstance->num_hrrq;
32041 cmd->cmd_done = pmcraid_io_done;
32042
32043@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
32044 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32045 * hrrq_id assigned here in queuecommand
32046 */
32047- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32048+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32049 pinstance->num_hrrq;
32050
32051 if (request_size) {
32052@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
32053
32054 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
32055 /* add resources only after host is added into system */
32056- if (!atomic_read(&pinstance->expose_resources))
32057+ if (!atomic_read_unchecked(&pinstance->expose_resources))
32058 return;
32059
32060 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
32061@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
32062 init_waitqueue_head(&pinstance->reset_wait_q);
32063
32064 atomic_set(&pinstance->outstanding_cmds, 0);
32065- atomic_set(&pinstance->last_message_id, 0);
32066- atomic_set(&pinstance->expose_resources, 0);
32067+ atomic_set_unchecked(&pinstance->last_message_id, 0);
32068+ atomic_set_unchecked(&pinstance->expose_resources, 0);
32069
32070 INIT_LIST_HEAD(&pinstance->free_res_q);
32071 INIT_LIST_HEAD(&pinstance->used_res_q);
32072@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
32073 /* Schedule worker thread to handle CCN and take care of adding and
32074 * removing devices to OS
32075 */
32076- atomic_set(&pinstance->expose_resources, 1);
32077+ atomic_set_unchecked(&pinstance->expose_resources, 1);
32078 schedule_work(&pinstance->worker_q);
32079 return rc;
32080
32081diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
32082--- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
32083+++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
32084@@ -749,7 +749,7 @@ struct pmcraid_instance {
32085 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
32086
32087 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
32088- atomic_t last_message_id;
32089+ atomic_unchecked_t last_message_id;
32090
32091 /* configuration table */
32092 struct pmcraid_config_table *cfg_table;
32093@@ -778,7 +778,7 @@ struct pmcraid_instance {
32094 atomic_t outstanding_cmds;
32095
32096 /* should add/delete resources to mid-layer now ?*/
32097- atomic_t expose_resources;
32098+ atomic_unchecked_t expose_resources;
32099
32100
32101
32102@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
32103 struct pmcraid_config_table_entry_ext cfg_entry_ext;
32104 };
32105 struct scsi_device *scsi_dev; /* Link scsi_device structure */
32106- atomic_t read_failures; /* count of failed READ commands */
32107- atomic_t write_failures; /* count of failed WRITE commands */
32108+ atomic_unchecked_t read_failures; /* count of failed READ commands */
32109+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
32110
32111 /* To indicate add/delete/modify during CCN */
32112 u8 change_detected;
32113diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
32114--- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
32115+++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
32116@@ -2244,7 +2244,7 @@ struct isp_operations {
32117 int (*get_flash_version) (struct scsi_qla_host *, void *);
32118 int (*start_scsi) (srb_t *);
32119 int (*abort_isp) (struct scsi_qla_host *);
32120-};
32121+} __no_const;
32122
32123 /* MSI-X Support *************************************************************/
32124
32125diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
32126--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
32127+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
32128@@ -256,7 +256,7 @@ struct ddb_entry {
32129 atomic_t retry_relogin_timer; /* Min Time between relogins
32130 * (4000 only) */
32131 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
32132- atomic_t relogin_retry_count; /* Num of times relogin has been
32133+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
32134 * retried */
32135
32136 uint16_t port;
32137diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
32138--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
32139+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
32140@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
32141 ddb_entry->fw_ddb_index = fw_ddb_index;
32142 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
32143 atomic_set(&ddb_entry->relogin_timer, 0);
32144- atomic_set(&ddb_entry->relogin_retry_count, 0);
32145+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32146 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32147 list_add_tail(&ddb_entry->list, &ha->ddb_list);
32148 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
32149@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
32150 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
32151 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
32152 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32153- atomic_set(&ddb_entry->relogin_retry_count, 0);
32154+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32155 atomic_set(&ddb_entry->relogin_timer, 0);
32156 clear_bit(DF_RELOGIN, &ddb_entry->flags);
32157 iscsi_unblock_session(ddb_entry->sess);
32158diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
32159--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
32160+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
32161@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
32162 ddb_entry->fw_ddb_device_state ==
32163 DDB_DS_SESSION_FAILED) {
32164 /* Reset retry relogin timer */
32165- atomic_inc(&ddb_entry->relogin_retry_count);
32166+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
32167 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
32168 " timed out-retrying"
32169 " relogin (%d)\n",
32170 ha->host_no,
32171 ddb_entry->fw_ddb_index,
32172- atomic_read(&ddb_entry->
32173+ atomic_read_unchecked(&ddb_entry->
32174 relogin_retry_count))
32175 );
32176 start_dpc++;
32177diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
32178--- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
32179+++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
32180@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
32181 unsigned long timeout;
32182 int rtn = 0;
32183
32184- atomic_inc(&cmd->device->iorequest_cnt);
32185+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32186
32187 /* check if the device is still usable */
32188 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
32189diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
32190--- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
32191+++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
32192@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
32193 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
32194 unsigned char *cmd = (unsigned char *)scp->cmnd;
32195
32196+ pax_track_stack();
32197+
32198 if ((errsts = check_readiness(scp, 1, devip)))
32199 return errsts;
32200 memset(arr, 0, sizeof(arr));
32201@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
32202 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
32203 unsigned char *cmd = (unsigned char *)scp->cmnd;
32204
32205+ pax_track_stack();
32206+
32207 if ((errsts = check_readiness(scp, 1, devip)))
32208 return errsts;
32209 memset(arr, 0, sizeof(arr));
32210diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
32211--- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
32212+++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
32213@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
32214 shost = sdev->host;
32215 scsi_init_cmd_errh(cmd);
32216 cmd->result = DID_NO_CONNECT << 16;
32217- atomic_inc(&cmd->device->iorequest_cnt);
32218+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32219
32220 /*
32221 * SCSI request completion path will do scsi_device_unbusy(),
32222@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
32223
32224 INIT_LIST_HEAD(&cmd->eh_entry);
32225
32226- atomic_inc(&cmd->device->iodone_cnt);
32227+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
32228 if (cmd->result)
32229- atomic_inc(&cmd->device->ioerr_cnt);
32230+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
32231
32232 disposition = scsi_decide_disposition(cmd);
32233 if (disposition != SUCCESS &&
32234diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
32235--- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
32236+++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
32237@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
32238 char *buf) \
32239 { \
32240 struct scsi_device *sdev = to_scsi_device(dev); \
32241- unsigned long long count = atomic_read(&sdev->field); \
32242+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
32243 return snprintf(buf, 20, "0x%llx\n", count); \
32244 } \
32245 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
32246diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
32247--- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
32248+++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
32249@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
32250 * Netlink Infrastructure
32251 */
32252
32253-static atomic_t fc_event_seq;
32254+static atomic_unchecked_t fc_event_seq;
32255
32256 /**
32257 * fc_get_event_number - Obtain the next sequential FC event number
32258@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
32259 u32
32260 fc_get_event_number(void)
32261 {
32262- return atomic_add_return(1, &fc_event_seq);
32263+ return atomic_add_return_unchecked(1, &fc_event_seq);
32264 }
32265 EXPORT_SYMBOL(fc_get_event_number);
32266
32267@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
32268 {
32269 int error;
32270
32271- atomic_set(&fc_event_seq, 0);
32272+ atomic_set_unchecked(&fc_event_seq, 0);
32273
32274 error = transport_class_register(&fc_host_class);
32275 if (error)
32276@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
32277 char *cp;
32278
32279 *val = simple_strtoul(buf, &cp, 0);
32280- if ((*cp && (*cp != '\n')) || (*val < 0))
32281+ if (*cp && (*cp != '\n'))
32282 return -EINVAL;
32283 /*
32284 * Check for overflow; dev_loss_tmo is u32
32285diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
32286--- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
32287+++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
32288@@ -83,7 +83,7 @@ struct iscsi_internal {
32289 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
32290 };
32291
32292-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
32293+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
32294 static struct workqueue_struct *iscsi_eh_timer_workq;
32295
32296 /*
32297@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
32298 int err;
32299
32300 ihost = shost->shost_data;
32301- session->sid = atomic_add_return(1, &iscsi_session_nr);
32302+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
32303
32304 if (id == ISCSI_MAX_TARGET) {
32305 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
32306@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
32307 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
32308 ISCSI_TRANSPORT_VERSION);
32309
32310- atomic_set(&iscsi_session_nr, 0);
32311+ atomic_set_unchecked(&iscsi_session_nr, 0);
32312
32313 err = class_register(&iscsi_transport_class);
32314 if (err)
32315diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
32316--- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
32317+++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
32318@@ -33,7 +33,7 @@
32319 #include "scsi_transport_srp_internal.h"
32320
32321 struct srp_host_attrs {
32322- atomic_t next_port_id;
32323+ atomic_unchecked_t next_port_id;
32324 };
32325 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
32326
32327@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
32328 struct Scsi_Host *shost = dev_to_shost(dev);
32329 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
32330
32331- atomic_set(&srp_host->next_port_id, 0);
32332+ atomic_set_unchecked(&srp_host->next_port_id, 0);
32333 return 0;
32334 }
32335
32336@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
32337 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
32338 rport->roles = ids->roles;
32339
32340- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
32341+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
32342 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
32343
32344 transport_setup_device(&rport->dev);
32345diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
32346--- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
32347+++ linux-3.0.4/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
32348@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
32349 const struct file_operations * fops;
32350 };
32351
32352-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
32353+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
32354 {"allow_dio", &adio_fops},
32355 {"debug", &debug_fops},
32356 {"def_reserved_size", &dressz_fops},
32357@@ -2325,7 +2325,7 @@ sg_proc_init(void)
32358 {
32359 int k, mask;
32360 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
32361- struct sg_proc_leaf * leaf;
32362+ const struct sg_proc_leaf * leaf;
32363
32364 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
32365 if (!sg_proc_sgp)
32366diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
32367--- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
32368+++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
32369@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
32370 int do_iounmap = 0;
32371 int do_disable_device = 1;
32372
32373+ pax_track_stack();
32374+
32375 memset(&sym_dev, 0, sizeof(sym_dev));
32376 memset(&nvram, 0, sizeof(nvram));
32377 sym_dev.pdev = pdev;
32378diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
32379--- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
32380+++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
32381@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
32382 dma_addr_t base;
32383 unsigned i;
32384
32385+ pax_track_stack();
32386+
32387 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
32388 cmd.reqRingNumPages = adapter->req_pages;
32389 cmd.cmpRingNumPages = adapter->cmp_pages;
32390diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
32391--- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
32392+++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
32393@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
32394 EXPORT_SYMBOL_GPL(spi_bus_unlock);
32395
32396 /* portable code must never pass more than 32 bytes */
32397-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
32398+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
32399
32400 static u8 *buf;
32401
32402diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
32403--- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
32404+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
32405@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
32406 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
32407
32408
32409-static struct net_device_ops ar6000_netdev_ops = {
32410+static net_device_ops_no_const ar6000_netdev_ops = {
32411 .ndo_init = NULL,
32412 .ndo_open = ar6000_open,
32413 .ndo_stop = ar6000_close,
32414diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
32415--- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
32416+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
32417@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
32418 typedef struct ar6k_pal_config_s
32419 {
32420 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32421-}ar6k_pal_config_t;
32422+} __no_const ar6k_pal_config_t;
32423
32424 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32425 #endif /* _AR6K_PAL_H_ */
32426diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32427--- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
32428+++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
32429@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32430 free_netdev(ifp->net);
32431 }
32432 /* Allocate etherdev, including space for private structure */
32433- ifp->net = alloc_etherdev(sizeof(dhd));
32434+ ifp->net = alloc_etherdev(sizeof(*dhd));
32435 if (!ifp->net) {
32436 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32437 ret = -ENOMEM;
32438 }
32439 if (ret == 0) {
32440 strcpy(ifp->net->name, ifp->name);
32441- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32442+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32443 err = dhd_net_attach(&dhd->pub, ifp->idx);
32444 if (err != 0) {
32445 DHD_ERROR(("%s: dhd_net_attach failed, "
32446@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32447 strcpy(nv_path, nvram_path);
32448
32449 /* Allocate etherdev, including space for private structure */
32450- net = alloc_etherdev(sizeof(dhd));
32451+ net = alloc_etherdev(sizeof(*dhd));
32452 if (!net) {
32453 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32454 goto fail;
32455@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32456 /*
32457 * Save the dhd_info into the priv
32458 */
32459- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32460+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32461
32462 /* Set network interface name if it was provided as module parameter */
32463 if (iface_name[0]) {
32464@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32465 /*
32466 * Save the dhd_info into the priv
32467 */
32468- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32469+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32470
32471 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32472 g_bus = bus;
32473diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
32474--- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
32475+++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
32476@@ -593,7 +593,7 @@ struct phy_func_ptr {
32477 initfn_t carrsuppr;
32478 rxsigpwrfn_t rxsigpwr;
32479 detachfn_t detach;
32480-};
32481+} __no_const;
32482 typedef struct phy_func_ptr phy_func_ptr_t;
32483
32484 struct phy_info {
32485diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
32486--- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
32487+++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
32488@@ -185,7 +185,7 @@ typedef struct {
32489 u16 func, uint bustype, void *regsva, void *param);
32490 /* detach from device */
32491 void (*detach) (void *ch);
32492-} bcmsdh_driver_t;
32493+} __no_const bcmsdh_driver_t;
32494
32495 /* platform specific/high level functions */
32496 extern int bcmsdh_register(bcmsdh_driver_t *driver);
32497diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
32498--- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
32499+++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
32500@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32501 struct net_device_stats *stats = &etdev->net_stats;
32502
32503 if (tcb->flags & fMP_DEST_BROAD)
32504- atomic_inc(&etdev->Stats.brdcstxmt);
32505+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32506 else if (tcb->flags & fMP_DEST_MULTI)
32507- atomic_inc(&etdev->Stats.multixmt);
32508+ atomic_inc_unchecked(&etdev->Stats.multixmt);
32509 else
32510- atomic_inc(&etdev->Stats.unixmt);
32511+ atomic_inc_unchecked(&etdev->Stats.unixmt);
32512
32513 if (tcb->skb) {
32514 stats->tx_bytes += tcb->skb->len;
32515diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
32516--- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
32517+++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
32518@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32519 * operations
32520 */
32521 u32 unircv; /* # multicast packets received */
32522- atomic_t unixmt; /* # multicast packets for Tx */
32523+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32524 u32 multircv; /* # multicast packets received */
32525- atomic_t multixmt; /* # multicast packets for Tx */
32526+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32527 u32 brdcstrcv; /* # broadcast packets received */
32528- atomic_t brdcstxmt; /* # broadcast packets for Tx */
32529+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32530 u32 norcvbuf; /* # Rx packets discarded */
32531 u32 noxmtbuf; /* # Tx packets discarded */
32532
32533diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
32534--- linux-3.0.4/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
32535+++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
32536@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32537 int ret = 0;
32538 int t;
32539
32540- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32541- atomic_inc(&vmbus_connection.next_gpadl_handle);
32542+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32543+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32544
32545 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32546 if (ret)
32547diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
32548--- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
32549+++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32550@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32551 u64 output_address = (output) ? virt_to_phys(output) : 0;
32552 u32 output_address_hi = output_address >> 32;
32553 u32 output_address_lo = output_address & 0xFFFFFFFF;
32554- volatile void *hypercall_page = hv_context.hypercall_page;
32555+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32556
32557 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32558 "=a"(hv_status_lo) : "d" (control_hi),
32559diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32560--- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32561+++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32562@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32563 if (hid_dev) {
32564 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32565
32566- hid_dev->ll_driver->open = mousevsc_hid_open;
32567- hid_dev->ll_driver->close = mousevsc_hid_close;
32568+ pax_open_kernel();
32569+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32570+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32571+ pax_close_kernel();
32572
32573 hid_dev->bus = BUS_VIRTUAL;
32574 hid_dev->vendor = input_device_ctx->device_info.vendor;
32575diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32576--- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32577+++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32578@@ -559,7 +559,7 @@ enum vmbus_connect_state {
32579 struct vmbus_connection {
32580 enum vmbus_connect_state conn_state;
32581
32582- atomic_t next_gpadl_handle;
32583+ atomic_unchecked_t next_gpadl_handle;
32584
32585 /*
32586 * Represents channel interrupts. Each bit position represents a
32587diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32588--- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
32589+++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32590@@ -43,7 +43,7 @@ struct rndis_device {
32591
32592 enum rndis_device_state state;
32593 u32 link_stat;
32594- atomic_t new_req_id;
32595+ atomic_unchecked_t new_req_id;
32596
32597 spinlock_t request_lock;
32598 struct list_head req_list;
32599@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32600 * template
32601 */
32602 set = &rndis_msg->msg.set_req;
32603- set->req_id = atomic_inc_return(&dev->new_req_id);
32604+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32605
32606 /* Add to the request list */
32607 spin_lock_irqsave(&dev->request_lock, flags);
32608@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32609
32610 /* Setup the rndis set */
32611 halt = &request->request_msg.msg.halt_req;
32612- halt->req_id = atomic_inc_return(&dev->new_req_id);
32613+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32614
32615 /* Ignore return since this msg is optional. */
32616 rndis_filter_send_request(dev, request);
32617diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32618--- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32619+++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32620@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32621 {
32622 int ret = 0;
32623
32624- static atomic_t device_num = ATOMIC_INIT(0);
32625+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32626
32627 /* Set the device name. Otherwise, device_register() will fail. */
32628 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32629- atomic_inc_return(&device_num));
32630+ atomic_inc_return_unchecked(&device_num));
32631
32632 /* The new device belongs to this bus */
32633 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32634diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32635--- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32636+++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32637@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32638
32639 int (*is_enabled)(struct iio_ring_buffer *ring);
32640 int (*enable)(struct iio_ring_buffer *ring);
32641-};
32642+} __no_const;
32643
32644 struct iio_ring_setup_ops {
32645 int (*preenable)(struct iio_dev *);
32646diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32647--- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32648+++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32649@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32650 * since the RX tasklet also increments it.
32651 */
32652 #ifdef CONFIG_64BIT
32653- atomic64_add(rx_status.dropped_packets,
32654- (atomic64_t *)&priv->stats.rx_dropped);
32655+ atomic64_add_unchecked(rx_status.dropped_packets,
32656+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32657 #else
32658- atomic_add(rx_status.dropped_packets,
32659- (atomic_t *)&priv->stats.rx_dropped);
32660+ atomic_add_unchecked(rx_status.dropped_packets,
32661+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32662 #endif
32663 }
32664
32665diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32666--- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32667+++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32668@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32669 /* Increment RX stats for virtual ports */
32670 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32671 #ifdef CONFIG_64BIT
32672- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32673- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32674+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32675+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32676 #else
32677- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32678- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32679+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32680+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32681 #endif
32682 }
32683 netif_receive_skb(skb);
32684@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32685 dev->name);
32686 */
32687 #ifdef CONFIG_64BIT
32688- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32689+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32690 #else
32691- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32692+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32693 #endif
32694 dev_kfree_skb_irq(skb);
32695 }
32696diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32697--- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32698+++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32699@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32700 mutex_init(&psb->mcache_lock);
32701 psb->mcache_root = RB_ROOT;
32702 psb->mcache_timeout = msecs_to_jiffies(5000);
32703- atomic_long_set(&psb->mcache_gen, 0);
32704+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32705
32706 psb->trans_max_pages = 100;
32707
32708@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32709 INIT_LIST_HEAD(&psb->crypto_ready_list);
32710 INIT_LIST_HEAD(&psb->crypto_active_list);
32711
32712- atomic_set(&psb->trans_gen, 1);
32713+ atomic_set_unchecked(&psb->trans_gen, 1);
32714 atomic_long_set(&psb->total_inodes, 0);
32715
32716 mutex_init(&psb->state_lock);
32717diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32718--- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32719+++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32720@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32721 m->data = data;
32722 m->start = start;
32723 m->size = size;
32724- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32725+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32726
32727 mutex_lock(&psb->mcache_lock);
32728 err = pohmelfs_mcache_insert(psb, m);
32729diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32730--- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32731+++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32732@@ -571,14 +571,14 @@ struct pohmelfs_config;
32733 struct pohmelfs_sb {
32734 struct rb_root mcache_root;
32735 struct mutex mcache_lock;
32736- atomic_long_t mcache_gen;
32737+ atomic_long_unchecked_t mcache_gen;
32738 unsigned long mcache_timeout;
32739
32740 unsigned int idx;
32741
32742 unsigned int trans_retries;
32743
32744- atomic_t trans_gen;
32745+ atomic_unchecked_t trans_gen;
32746
32747 unsigned int crypto_attached_size;
32748 unsigned int crypto_align_size;
32749diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32750--- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32751+++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32752@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32753 int err;
32754 struct netfs_cmd *cmd = t->iovec.iov_base;
32755
32756- t->gen = atomic_inc_return(&psb->trans_gen);
32757+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32758
32759 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32760 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32761diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32762--- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32763+++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32764@@ -83,7 +83,7 @@ struct _io_ops {
32765 u8 *pmem);
32766 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32767 u8 *pmem);
32768-};
32769+} __no_const;
32770
32771 struct io_req {
32772 struct list_head list;
32773diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32774--- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32775+++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32776@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32777 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32778
32779 if (rlen)
32780- if (copy_to_user(data, &resp, rlen))
32781+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32782 return -EFAULT;
32783
32784 return 0;
32785diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32786--- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32787+++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32788@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32789 struct stlport stl_dummyport;
32790 struct stlport *portp;
32791
32792+ pax_track_stack();
32793+
32794 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32795 return -EFAULT;
32796 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32797diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32798--- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32799+++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32800@@ -315,7 +315,7 @@ struct usbip_device {
32801 void (*shutdown)(struct usbip_device *);
32802 void (*reset)(struct usbip_device *);
32803 void (*unusable)(struct usbip_device *);
32804- } eh_ops;
32805+ } __no_const eh_ops;
32806 };
32807
32808 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32809diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32810--- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32811+++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32812@@ -94,7 +94,7 @@ struct vhci_hcd {
32813 unsigned resuming:1;
32814 unsigned long re_timeout;
32815
32816- atomic_t seqnum;
32817+ atomic_unchecked_t seqnum;
32818
32819 /*
32820 * NOTE:
32821diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32822--- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
32823+++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32824@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32825 return;
32826 }
32827
32828- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32829+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32830 if (priv->seqnum == 0xffff)
32831 dev_info(&urb->dev->dev, "seqnum max\n");
32832
32833@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32834 return -ENOMEM;
32835 }
32836
32837- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32838+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32839 if (unlink->seqnum == 0xffff)
32840 pr_info("seqnum max\n");
32841
32842@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32843 vdev->rhport = rhport;
32844 }
32845
32846- atomic_set(&vhci->seqnum, 0);
32847+ atomic_set_unchecked(&vhci->seqnum, 0);
32848 spin_lock_init(&vhci->lock);
32849
32850 hcd->power_budget = 0; /* no limit */
32851diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32852--- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32853+++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32854@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32855 if (!urb) {
32856 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32857 pr_info("max seqnum %d\n",
32858- atomic_read(&the_controller->seqnum));
32859+ atomic_read_unchecked(&the_controller->seqnum));
32860 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32861 return;
32862 }
32863diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32864--- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32865+++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32866@@ -79,14 +79,13 @@ static int msglevel
32867 *
32868 */
32869
32870+static net_device_ops_no_const apdev_netdev_ops;
32871+
32872 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32873 {
32874 PSDevice apdev_priv;
32875 struct net_device *dev = pDevice->dev;
32876 int ret;
32877- const struct net_device_ops apdev_netdev_ops = {
32878- .ndo_start_xmit = pDevice->tx_80211,
32879- };
32880
32881 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32882
32883@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32884 *apdev_priv = *pDevice;
32885 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32886
32887+ /* only half broken now */
32888+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32889 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32890
32891 pDevice->apdev->type = ARPHRD_IEEE80211;
32892diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32893--- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32894+++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32895@@ -80,14 +80,13 @@ static int msglevel
32896 *
32897 */
32898
32899+static net_device_ops_no_const apdev_netdev_ops;
32900+
32901 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32902 {
32903 PSDevice apdev_priv;
32904 struct net_device *dev = pDevice->dev;
32905 int ret;
32906- const struct net_device_ops apdev_netdev_ops = {
32907- .ndo_start_xmit = pDevice->tx_80211,
32908- };
32909
32910 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32911
32912@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32913 *apdev_priv = *pDevice;
32914 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32915
32916+ /* only half broken now */
32917+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32918 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32919
32920 pDevice->apdev->type = ARPHRD_IEEE80211;
32921diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32922--- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32923+++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32924@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32925
32926 struct usbctlx_completor {
32927 int (*complete) (struct usbctlx_completor *);
32928-};
32929+} __no_const;
32930
32931 static int
32932 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32933diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32934--- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32935+++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32936@@ -39,7 +39,7 @@
32937 * A tmem host implementation must use this function to register callbacks
32938 * for memory allocation.
32939 */
32940-static struct tmem_hostops tmem_hostops;
32941+static tmem_hostops_no_const tmem_hostops;
32942
32943 static void tmem_objnode_tree_init(void);
32944
32945@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32946 * A tmem host implementation must use this function to register
32947 * callbacks for a page-accessible memory (PAM) implementation
32948 */
32949-static struct tmem_pamops tmem_pamops;
32950+static tmem_pamops_no_const tmem_pamops;
32951
32952 void tmem_register_pamops(struct tmem_pamops *m)
32953 {
32954diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32955--- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32956+++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32957@@ -171,6 +171,7 @@ struct tmem_pamops {
32958 int (*get_data)(struct page *, void *, struct tmem_pool *);
32959 void (*free)(void *, struct tmem_pool *);
32960 };
32961+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32962 extern void tmem_register_pamops(struct tmem_pamops *m);
32963
32964 /* memory allocation methods provided by the host implementation */
32965@@ -180,6 +181,7 @@ struct tmem_hostops {
32966 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32967 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32968 };
32969+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32970 extern void tmem_register_hostops(struct tmem_hostops *m);
32971
32972 /* core tmem accessor functions */
32973diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32974--- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32975+++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32976@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32977 char path[ALUA_METADATA_PATH_LEN];
32978 int len;
32979
32980+ pax_track_stack();
32981+
32982 memset(path, 0, ALUA_METADATA_PATH_LEN);
32983
32984 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32985@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32986 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32987 int len;
32988
32989+ pax_track_stack();
32990+
32991 memset(path, 0, ALUA_METADATA_PATH_LEN);
32992 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32993
32994diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32995--- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32996+++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32997@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32998 int length = 0;
32999 unsigned char buf[SE_MODE_PAGE_BUF];
33000
33001+ pax_track_stack();
33002+
33003 memset(buf, 0, SE_MODE_PAGE_BUF);
33004
33005 switch (cdb[2] & 0x3f) {
33006diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
33007--- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
33008+++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
33009@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
33010 ssize_t len = 0;
33011 int reg_count = 0, prf_isid;
33012
33013+ pax_track_stack();
33014+
33015 if (!(su_dev->se_dev_ptr))
33016 return -ENODEV;
33017
33018diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
33019--- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
33020+++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
33021@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
33022 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
33023 u16 tpgt;
33024
33025+ pax_track_stack();
33026+
33027 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
33028 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
33029 /*
33030@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
33031 ssize_t len = 0;
33032 int reg_count = 0;
33033
33034+ pax_track_stack();
33035+
33036 memset(buf, 0, pr_aptpl_buf_len);
33037 /*
33038 * Called to clear metadata once APTPL has been deactivated.
33039@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
33040 char path[512];
33041 int ret;
33042
33043+ pax_track_stack();
33044+
33045 memset(iov, 0, sizeof(struct iovec));
33046 memset(path, 0, 512);
33047
33048diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
33049--- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
33050+++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
33051@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
33052 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
33053 T_TASK(cmd)->t_task_cdbs,
33054 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33055- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33056+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33057 atomic_read(&T_TASK(cmd)->t_transport_active),
33058 atomic_read(&T_TASK(cmd)->t_transport_stop),
33059 atomic_read(&T_TASK(cmd)->t_transport_sent));
33060@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
33061 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
33062 " task: %p, t_fe_count: %d dev: %p\n", task,
33063 fe_count, dev);
33064- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33065+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33066 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
33067 flags);
33068 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33069@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
33070 }
33071 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
33072 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
33073- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33074+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33075 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
33076 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33077
33078diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
33079--- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
33080+++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
33081@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
33082
33083 dev->queue_depth = dev_limits->queue_depth;
33084 atomic_set(&dev->depth_left, dev->queue_depth);
33085- atomic_set(&dev->dev_ordered_id, 0);
33086+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
33087
33088 se_dev_set_default_attribs(dev, dev_limits);
33089
33090@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
33091 * Used to determine when ORDERED commands should go from
33092 * Dormant to Active status.
33093 */
33094- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
33095+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
33096 smp_mb__after_atomic_inc();
33097 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
33098 cmd->se_ordered_id, cmd->sam_task_attr,
33099@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
33100 " t_transport_active: %d t_transport_stop: %d"
33101 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
33102 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33103- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33104+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33105 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
33106 atomic_read(&T_TASK(cmd)->t_transport_active),
33107 atomic_read(&T_TASK(cmd)->t_transport_stop),
33108@@ -2673,9 +2673,9 @@ check_depth:
33109 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
33110 atomic_set(&task->task_active, 1);
33111 atomic_set(&task->task_sent, 1);
33112- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
33113+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
33114
33115- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
33116+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
33117 T_TASK(cmd)->t_task_cdbs)
33118 atomic_set(&cmd->transport_sent, 1);
33119
33120@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
33121 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
33122 }
33123 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
33124- atomic_read(&T_TASK(cmd)->t_transport_aborted))
33125+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
33126 goto remove;
33127
33128 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
33129@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
33130 {
33131 int ret = 0;
33132
33133- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
33134+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
33135 if (!(send_status) ||
33136 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
33137 return 1;
33138@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
33139 */
33140 if (cmd->data_direction == DMA_TO_DEVICE) {
33141 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
33142- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
33143+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
33144 smp_mb__after_atomic_inc();
33145 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
33146 transport_new_cmd_failure(cmd);
33147@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
33148 CMD_TFO(cmd)->get_task_tag(cmd),
33149 T_TASK(cmd)->t_task_cdbs,
33150 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33151- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33152+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33153 atomic_read(&T_TASK(cmd)->t_transport_active),
33154 atomic_read(&T_TASK(cmd)->t_transport_stop),
33155 atomic_read(&T_TASK(cmd)->t_transport_sent));
33156diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
33157--- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
33158+++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
33159@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
33160 bool mContinue;
33161 char *pIn, *pOut;
33162
33163+ pax_track_stack();
33164+
33165 if (!SCI_Prepare(j))
33166 return 0;
33167
33168diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
33169--- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
33170+++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
33171@@ -83,6 +83,7 @@
33172 #include <asm/hvcserver.h>
33173 #include <asm/uaccess.h>
33174 #include <asm/vio.h>
33175+#include <asm/local.h>
33176
33177 /*
33178 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
33179@@ -270,7 +271,7 @@ struct hvcs_struct {
33180 unsigned int index;
33181
33182 struct tty_struct *tty;
33183- int open_count;
33184+ local_t open_count;
33185
33186 /*
33187 * Used to tell the driver kernel_thread what operations need to take
33188@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
33189
33190 spin_lock_irqsave(&hvcsd->lock, flags);
33191
33192- if (hvcsd->open_count > 0) {
33193+ if (local_read(&hvcsd->open_count) > 0) {
33194 spin_unlock_irqrestore(&hvcsd->lock, flags);
33195 printk(KERN_INFO "HVCS: vterm state unchanged. "
33196 "The hvcs device node is still in use.\n");
33197@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
33198 if ((retval = hvcs_partner_connect(hvcsd)))
33199 goto error_release;
33200
33201- hvcsd->open_count = 1;
33202+ local_set(&hvcsd->open_count, 1);
33203 hvcsd->tty = tty;
33204 tty->driver_data = hvcsd;
33205
33206@@ -1179,7 +1180,7 @@ fast_open:
33207
33208 spin_lock_irqsave(&hvcsd->lock, flags);
33209 kref_get(&hvcsd->kref);
33210- hvcsd->open_count++;
33211+ local_inc(&hvcsd->open_count);
33212 hvcsd->todo_mask |= HVCS_SCHED_READ;
33213 spin_unlock_irqrestore(&hvcsd->lock, flags);
33214
33215@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
33216 hvcsd = tty->driver_data;
33217
33218 spin_lock_irqsave(&hvcsd->lock, flags);
33219- if (--hvcsd->open_count == 0) {
33220+ if (local_dec_and_test(&hvcsd->open_count)) {
33221
33222 vio_disable_interrupts(hvcsd->vdev);
33223
33224@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
33225 free_irq(irq, hvcsd);
33226 kref_put(&hvcsd->kref, destroy_hvcs_struct);
33227 return;
33228- } else if (hvcsd->open_count < 0) {
33229+ } else if (local_read(&hvcsd->open_count) < 0) {
33230 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
33231 " is missmanaged.\n",
33232- hvcsd->vdev->unit_address, hvcsd->open_count);
33233+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
33234 }
33235
33236 spin_unlock_irqrestore(&hvcsd->lock, flags);
33237@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
33238
33239 spin_lock_irqsave(&hvcsd->lock, flags);
33240 /* Preserve this so that we know how many kref refs to put */
33241- temp_open_count = hvcsd->open_count;
33242+ temp_open_count = local_read(&hvcsd->open_count);
33243
33244 /*
33245 * Don't kref put inside the spinlock because the destruction
33246@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
33247 hvcsd->tty->driver_data = NULL;
33248 hvcsd->tty = NULL;
33249
33250- hvcsd->open_count = 0;
33251+ local_set(&hvcsd->open_count, 0);
33252
33253 /* This will drop any buffered data on the floor which is OK in a hangup
33254 * scenario. */
33255@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
33256 * the middle of a write operation? This is a crummy place to do this
33257 * but we want to keep it all in the spinlock.
33258 */
33259- if (hvcsd->open_count <= 0) {
33260+ if (local_read(&hvcsd->open_count) <= 0) {
33261 spin_unlock_irqrestore(&hvcsd->lock, flags);
33262 return -ENODEV;
33263 }
33264@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
33265 {
33266 struct hvcs_struct *hvcsd = tty->driver_data;
33267
33268- if (!hvcsd || hvcsd->open_count <= 0)
33269+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
33270 return 0;
33271
33272 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
33273diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
33274--- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
33275+++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
33276@@ -29,6 +29,7 @@
33277 #include <linux/tty_driver.h>
33278 #include <linux/tty_flip.h>
33279 #include <linux/uaccess.h>
33280+#include <asm/local.h>
33281
33282 #include "tty.h"
33283 #include "network.h"
33284@@ -51,7 +52,7 @@ struct ipw_tty {
33285 int tty_type;
33286 struct ipw_network *network;
33287 struct tty_struct *linux_tty;
33288- int open_count;
33289+ local_t open_count;
33290 unsigned int control_lines;
33291 struct mutex ipw_tty_mutex;
33292 int tx_bytes_queued;
33293@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
33294 mutex_unlock(&tty->ipw_tty_mutex);
33295 return -ENODEV;
33296 }
33297- if (tty->open_count == 0)
33298+ if (local_read(&tty->open_count) == 0)
33299 tty->tx_bytes_queued = 0;
33300
33301- tty->open_count++;
33302+ local_inc(&tty->open_count);
33303
33304 tty->linux_tty = linux_tty;
33305 linux_tty->driver_data = tty;
33306@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
33307
33308 static void do_ipw_close(struct ipw_tty *tty)
33309 {
33310- tty->open_count--;
33311-
33312- if (tty->open_count == 0) {
33313+ if (local_dec_return(&tty->open_count) == 0) {
33314 struct tty_struct *linux_tty = tty->linux_tty;
33315
33316 if (linux_tty != NULL) {
33317@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
33318 return;
33319
33320 mutex_lock(&tty->ipw_tty_mutex);
33321- if (tty->open_count == 0) {
33322+ if (local_read(&tty->open_count) == 0) {
33323 mutex_unlock(&tty->ipw_tty_mutex);
33324 return;
33325 }
33326@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
33327 return;
33328 }
33329
33330- if (!tty->open_count) {
33331+ if (!local_read(&tty->open_count)) {
33332 mutex_unlock(&tty->ipw_tty_mutex);
33333 return;
33334 }
33335@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
33336 return -ENODEV;
33337
33338 mutex_lock(&tty->ipw_tty_mutex);
33339- if (!tty->open_count) {
33340+ if (!local_read(&tty->open_count)) {
33341 mutex_unlock(&tty->ipw_tty_mutex);
33342 return -EINVAL;
33343 }
33344@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
33345 if (!tty)
33346 return -ENODEV;
33347
33348- if (!tty->open_count)
33349+ if (!local_read(&tty->open_count))
33350 return -EINVAL;
33351
33352 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33353@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
33354 if (!tty)
33355 return 0;
33356
33357- if (!tty->open_count)
33358+ if (!local_read(&tty->open_count))
33359 return 0;
33360
33361 return tty->tx_bytes_queued;
33362@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
33363 if (!tty)
33364 return -ENODEV;
33365
33366- if (!tty->open_count)
33367+ if (!local_read(&tty->open_count))
33368 return -EINVAL;
33369
33370 return get_control_lines(tty);
33371@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
33372 if (!tty)
33373 return -ENODEV;
33374
33375- if (!tty->open_count)
33376+ if (!local_read(&tty->open_count))
33377 return -EINVAL;
33378
33379 return set_control_lines(tty, set, clear);
33380@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
33381 if (!tty)
33382 return -ENODEV;
33383
33384- if (!tty->open_count)
33385+ if (!local_read(&tty->open_count))
33386 return -EINVAL;
33387
33388 /* FIXME: Exactly how is the tty object locked here .. */
33389@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
33390 against a parallel ioctl etc */
33391 mutex_lock(&ttyj->ipw_tty_mutex);
33392 }
33393- while (ttyj->open_count)
33394+ while (local_read(&ttyj->open_count))
33395 do_ipw_close(ttyj);
33396 ipwireless_disassociate_network_ttys(network,
33397 ttyj->channel_idx);
33398diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
33399--- linux-3.0.4/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
33400+++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
33401@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
33402 return NULL;
33403 spin_lock_init(&dlci->lock);
33404 dlci->fifo = &dlci->_fifo;
33405- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
33406+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
33407 kfree(dlci);
33408 return NULL;
33409 }
33410diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
33411--- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
33412+++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
33413@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
33414 {
33415 *ops = tty_ldisc_N_TTY;
33416 ops->owner = NULL;
33417- ops->refcount = ops->flags = 0;
33418+ atomic_set(&ops->refcount, 0);
33419+ ops->flags = 0;
33420 }
33421 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
33422diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
33423--- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
33424+++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
33425@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
33426 register_sysctl_table(pty_root_table);
33427
33428 /* Now create the /dev/ptmx special device */
33429+ pax_open_kernel();
33430 tty_default_fops(&ptmx_fops);
33431- ptmx_fops.open = ptmx_open;
33432+ *(void **)&ptmx_fops.open = ptmx_open;
33433+ pax_close_kernel();
33434
33435 cdev_init(&ptmx_cdev, &ptmx_fops);
33436 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33437diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
33438--- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
33439+++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
33440@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
33441 struct rocket_ports tmp;
33442 int board;
33443
33444+ pax_track_stack();
33445+
33446 if (!retports)
33447 return -EFAULT;
33448 memset(&tmp, 0, sizeof (tmp));
33449diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
33450--- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
33451+++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
33452@@ -23,8 +23,9 @@
33453 #define MAX_CONFIG_LEN 40
33454
33455 static struct kgdb_io kgdboc_io_ops;
33456+static struct kgdb_io kgdboc_io_ops_console;
33457
33458-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33459+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33460 static int configured = -1;
33461
33462 static char config[MAX_CONFIG_LEN];
33463@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33464 kgdboc_unregister_kbd();
33465 if (configured == 1)
33466 kgdb_unregister_io_module(&kgdboc_io_ops);
33467+ else if (configured == 2)
33468+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
33469 }
33470
33471 static int configure_kgdboc(void)
33472@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33473 int err;
33474 char *cptr = config;
33475 struct console *cons;
33476+ int is_console = 0;
33477
33478 err = kgdboc_option_setup(config);
33479 if (err || !strlen(config) || isspace(config[0]))
33480 goto noconfig;
33481
33482 err = -ENODEV;
33483- kgdboc_io_ops.is_console = 0;
33484 kgdb_tty_driver = NULL;
33485
33486 kgdboc_use_kms = 0;
33487@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33488 int idx;
33489 if (cons->device && cons->device(cons, &idx) == p &&
33490 idx == tty_line) {
33491- kgdboc_io_ops.is_console = 1;
33492+ is_console = 1;
33493 break;
33494 }
33495 cons = cons->next;
33496@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33497 kgdb_tty_line = tty_line;
33498
33499 do_register:
33500- err = kgdb_register_io_module(&kgdboc_io_ops);
33501+ if (is_console) {
33502+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
33503+ configured = 2;
33504+ } else {
33505+ err = kgdb_register_io_module(&kgdboc_io_ops);
33506+ configured = 1;
33507+ }
33508 if (err)
33509 goto noconfig;
33510
33511- configured = 1;
33512-
33513 return 0;
33514
33515 noconfig:
33516@@ -212,7 +219,7 @@ noconfig:
33517 static int __init init_kgdboc(void)
33518 {
33519 /* Already configured? */
33520- if (configured == 1)
33521+ if (configured >= 1)
33522 return 0;
33523
33524 return configure_kgdboc();
33525@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33526 if (config[len - 1] == '\n')
33527 config[len - 1] = '\0';
33528
33529- if (configured == 1)
33530+ if (configured >= 1)
33531 cleanup_kgdboc();
33532
33533 /* Go and configure with the new params. */
33534@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33535 .post_exception = kgdboc_post_exp_handler,
33536 };
33537
33538+static struct kgdb_io kgdboc_io_ops_console = {
33539+ .name = "kgdboc",
33540+ .read_char = kgdboc_get_char,
33541+ .write_char = kgdboc_put_char,
33542+ .pre_exception = kgdboc_pre_exp_handler,
33543+ .post_exception = kgdboc_post_exp_handler,
33544+ .is_console = 1
33545+};
33546+
33547 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33548 /* This is only available if kgdboc is a built in for early debugging */
33549 static int __init kgdboc_early_init(char *opt)
33550diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
33551--- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33552+++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33553@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33554 int loop = 1, num, total = 0;
33555 u8 recv_buf[512], *pbuf;
33556
33557+ pax_track_stack();
33558+
33559 pbuf = recv_buf;
33560 do {
33561 num = max3110_read_multi(max, pbuf);
33562diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33563--- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33564+++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33565@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33566
33567 void tty_default_fops(struct file_operations *fops)
33568 {
33569- *fops = tty_fops;
33570+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33571 }
33572
33573 /*
33574diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33575--- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33576+++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33577@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33578 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33579 struct tty_ldisc_ops *ldo = ld->ops;
33580
33581- ldo->refcount--;
33582+ atomic_dec(&ldo->refcount);
33583 module_put(ldo->owner);
33584 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33585
33586@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33587 spin_lock_irqsave(&tty_ldisc_lock, flags);
33588 tty_ldiscs[disc] = new_ldisc;
33589 new_ldisc->num = disc;
33590- new_ldisc->refcount = 0;
33591+ atomic_set(&new_ldisc->refcount, 0);
33592 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33593
33594 return ret;
33595@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33596 return -EINVAL;
33597
33598 spin_lock_irqsave(&tty_ldisc_lock, flags);
33599- if (tty_ldiscs[disc]->refcount)
33600+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33601 ret = -EBUSY;
33602 else
33603 tty_ldiscs[disc] = NULL;
33604@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33605 if (ldops) {
33606 ret = ERR_PTR(-EAGAIN);
33607 if (try_module_get(ldops->owner)) {
33608- ldops->refcount++;
33609+ atomic_inc(&ldops->refcount);
33610 ret = ldops;
33611 }
33612 }
33613@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33614 unsigned long flags;
33615
33616 spin_lock_irqsave(&tty_ldisc_lock, flags);
33617- ldops->refcount--;
33618+ atomic_dec(&ldops->refcount);
33619 module_put(ldops->owner);
33620 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33621 }
33622diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33623--- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33624+++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33625@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33626 kbd->kbdmode == VC_OFF) &&
33627 value != KVAL(K_SAK))
33628 return; /* SAK is allowed even in raw mode */
33629+
33630+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33631+ {
33632+ void *func = fn_handler[value];
33633+ if (func == fn_show_state || func == fn_show_ptregs ||
33634+ func == fn_show_mem)
33635+ return;
33636+ }
33637+#endif
33638+
33639 fn_handler[value](vc);
33640 }
33641
33642diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33643--- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33644+++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33645@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33646
33647 static void notify_write(struct vc_data *vc, unsigned int unicode)
33648 {
33649- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33650+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33651 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33652 }
33653
33654diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33655--- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33656+++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33657@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33658 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33659 return -EFAULT;
33660
33661- if (!capable(CAP_SYS_TTY_CONFIG))
33662- perm = 0;
33663-
33664 switch (cmd) {
33665 case KDGKBENT:
33666 key_map = key_maps[s];
33667@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33668 val = (i ? K_HOLE : K_NOSUCHMAP);
33669 return put_user(val, &user_kbe->kb_value);
33670 case KDSKBENT:
33671+ if (!capable(CAP_SYS_TTY_CONFIG))
33672+ perm = 0;
33673+
33674 if (!perm)
33675 return -EPERM;
33676 if (!i && v == K_NOSUCHMAP) {
33677@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33678 int i, j, k;
33679 int ret;
33680
33681- if (!capable(CAP_SYS_TTY_CONFIG))
33682- perm = 0;
33683-
33684 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33685 if (!kbs) {
33686 ret = -ENOMEM;
33687@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33688 kfree(kbs);
33689 return ((p && *p) ? -EOVERFLOW : 0);
33690 case KDSKBSENT:
33691+ if (!capable(CAP_SYS_TTY_CONFIG))
33692+ perm = 0;
33693+
33694 if (!perm) {
33695 ret = -EPERM;
33696 goto reterr;
33697diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33698--- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33699+++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33700@@ -25,6 +25,7 @@
33701 #include <linux/kobject.h>
33702 #include <linux/cdev.h>
33703 #include <linux/uio_driver.h>
33704+#include <asm/local.h>
33705
33706 #define UIO_MAX_DEVICES (1U << MINORBITS)
33707
33708@@ -32,10 +33,10 @@ struct uio_device {
33709 struct module *owner;
33710 struct device *dev;
33711 int minor;
33712- atomic_t event;
33713+ atomic_unchecked_t event;
33714 struct fasync_struct *async_queue;
33715 wait_queue_head_t wait;
33716- int vma_count;
33717+ local_t vma_count;
33718 struct uio_info *info;
33719 struct kobject *map_dir;
33720 struct kobject *portio_dir;
33721@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33722 struct device_attribute *attr, char *buf)
33723 {
33724 struct uio_device *idev = dev_get_drvdata(dev);
33725- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33726+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33727 }
33728
33729 static struct device_attribute uio_class_attributes[] = {
33730@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33731 {
33732 struct uio_device *idev = info->uio_dev;
33733
33734- atomic_inc(&idev->event);
33735+ atomic_inc_unchecked(&idev->event);
33736 wake_up_interruptible(&idev->wait);
33737 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33738 }
33739@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33740 }
33741
33742 listener->dev = idev;
33743- listener->event_count = atomic_read(&idev->event);
33744+ listener->event_count = atomic_read_unchecked(&idev->event);
33745 filep->private_data = listener;
33746
33747 if (idev->info->open) {
33748@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33749 return -EIO;
33750
33751 poll_wait(filep, &idev->wait, wait);
33752- if (listener->event_count != atomic_read(&idev->event))
33753+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33754 return POLLIN | POLLRDNORM;
33755 return 0;
33756 }
33757@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33758 do {
33759 set_current_state(TASK_INTERRUPTIBLE);
33760
33761- event_count = atomic_read(&idev->event);
33762+ event_count = atomic_read_unchecked(&idev->event);
33763 if (event_count != listener->event_count) {
33764 if (copy_to_user(buf, &event_count, count))
33765 retval = -EFAULT;
33766@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33767 static void uio_vma_open(struct vm_area_struct *vma)
33768 {
33769 struct uio_device *idev = vma->vm_private_data;
33770- idev->vma_count++;
33771+ local_inc(&idev->vma_count);
33772 }
33773
33774 static void uio_vma_close(struct vm_area_struct *vma)
33775 {
33776 struct uio_device *idev = vma->vm_private_data;
33777- idev->vma_count--;
33778+ local_dec(&idev->vma_count);
33779 }
33780
33781 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33782@@ -823,7 +824,7 @@ int __uio_register_device(struct module
33783 idev->owner = owner;
33784 idev->info = info;
33785 init_waitqueue_head(&idev->wait);
33786- atomic_set(&idev->event, 0);
33787+ atomic_set_unchecked(&idev->event, 0);
33788
33789 ret = uio_get_minor(idev);
33790 if (ret)
33791diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33792--- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33793+++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33794@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33795 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33796 if (ret < 2)
33797 return -EINVAL;
33798- if (index < 0 || index > 0x7f)
33799+ if (index > 0x7f)
33800 return -EINVAL;
33801 pos += tmp;
33802
33803diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33804--- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33805+++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33806@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33807 if (printk_ratelimit())
33808 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33809 __func__, vpi, vci);
33810- atomic_inc(&vcc->stats->rx_err);
33811+ atomic_inc_unchecked(&vcc->stats->rx_err);
33812 return;
33813 }
33814
33815@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33816 if (length > ATM_MAX_AAL5_PDU) {
33817 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33818 __func__, length, vcc);
33819- atomic_inc(&vcc->stats->rx_err);
33820+ atomic_inc_unchecked(&vcc->stats->rx_err);
33821 goto out;
33822 }
33823
33824@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33825 if (sarb->len < pdu_length) {
33826 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33827 __func__, pdu_length, sarb->len, vcc);
33828- atomic_inc(&vcc->stats->rx_err);
33829+ atomic_inc_unchecked(&vcc->stats->rx_err);
33830 goto out;
33831 }
33832
33833 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33834 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33835 __func__, vcc);
33836- atomic_inc(&vcc->stats->rx_err);
33837+ atomic_inc_unchecked(&vcc->stats->rx_err);
33838 goto out;
33839 }
33840
33841@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33842 if (printk_ratelimit())
33843 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33844 __func__, length);
33845- atomic_inc(&vcc->stats->rx_drop);
33846+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33847 goto out;
33848 }
33849
33850@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33851
33852 vcc->push(vcc, skb);
33853
33854- atomic_inc(&vcc->stats->rx);
33855+ atomic_inc_unchecked(&vcc->stats->rx);
33856 out:
33857 skb_trim(sarb, 0);
33858 }
33859@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33860 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33861
33862 usbatm_pop(vcc, skb);
33863- atomic_inc(&vcc->stats->tx);
33864+ atomic_inc_unchecked(&vcc->stats->tx);
33865
33866 skb = skb_dequeue(&instance->sndqueue);
33867 }
33868@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33869 if (!left--)
33870 return sprintf(page,
33871 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33872- atomic_read(&atm_dev->stats.aal5.tx),
33873- atomic_read(&atm_dev->stats.aal5.tx_err),
33874- atomic_read(&atm_dev->stats.aal5.rx),
33875- atomic_read(&atm_dev->stats.aal5.rx_err),
33876- atomic_read(&atm_dev->stats.aal5.rx_drop));
33877+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33878+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33879+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33880+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33881+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33882
33883 if (!left--) {
33884 if (instance->disconnected)
33885diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33886--- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33887+++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33888@@ -126,7 +126,7 @@ static const char format_endpt[] =
33889 * time it gets called.
33890 */
33891 static struct device_connect_event {
33892- atomic_t count;
33893+ atomic_unchecked_t count;
33894 wait_queue_head_t wait;
33895 } device_event = {
33896 .count = ATOMIC_INIT(1),
33897@@ -164,7 +164,7 @@ static const struct class_info clas_info
33898
33899 void usbfs_conn_disc_event(void)
33900 {
33901- atomic_add(2, &device_event.count);
33902+ atomic_add_unchecked(2, &device_event.count);
33903 wake_up(&device_event.wait);
33904 }
33905
33906@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33907
33908 poll_wait(file, &device_event.wait, wait);
33909
33910- event_count = atomic_read(&device_event.count);
33911+ event_count = atomic_read_unchecked(&device_event.count);
33912 if (file->f_version != event_count) {
33913 file->f_version = event_count;
33914 return POLLIN | POLLRDNORM;
33915diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33916--- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33917+++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33918@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33919 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33920 if (buf) {
33921 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33922- if (len > 0) {
33923- smallbuf = kmalloc(++len, GFP_NOIO);
33924+ if (len++ > 0) {
33925+ smallbuf = kmalloc(len, GFP_NOIO);
33926 if (!smallbuf)
33927 return buf;
33928 memcpy(smallbuf, buf, len);
33929diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33930--- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33931+++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33932@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33933
33934 #ifdef CONFIG_KGDB
33935 static struct kgdb_io kgdbdbgp_io_ops;
33936-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33937+static struct kgdb_io kgdbdbgp_io_ops_console;
33938+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33939 #else
33940 #define dbgp_kgdb_mode (0)
33941 #endif
33942@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33943 .write_char = kgdbdbgp_write_char,
33944 };
33945
33946+static struct kgdb_io kgdbdbgp_io_ops_console = {
33947+ .name = "kgdbdbgp",
33948+ .read_char = kgdbdbgp_read_char,
33949+ .write_char = kgdbdbgp_write_char,
33950+ .is_console = 1
33951+};
33952+
33953 static int kgdbdbgp_wait_time;
33954
33955 static int __init kgdbdbgp_parse_config(char *str)
33956@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33957 ptr++;
33958 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33959 }
33960- kgdb_register_io_module(&kgdbdbgp_io_ops);
33961- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33962+ if (early_dbgp_console.index != -1)
33963+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33964+ else
33965+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33966
33967 return 0;
33968 }
33969diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33970--- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33971+++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33972@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33973 unsigned int num_tests;
33974 int i, ret;
33975
33976+ pax_track_stack();
33977+
33978 num_tests = ARRAY_SIZE(simple_test_vector);
33979 for (i = 0; i < num_tests; i++) {
33980 ret = xhci_test_trb_in_td(xhci,
33981diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33982--- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33983+++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33984@@ -192,7 +192,7 @@ struct wahc {
33985 struct list_head xfer_delayed_list;
33986 spinlock_t xfer_list_lock;
33987 struct work_struct xfer_work;
33988- atomic_t xfer_id_count;
33989+ atomic_unchecked_t xfer_id_count;
33990 };
33991
33992
33993@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33994 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33995 spin_lock_init(&wa->xfer_list_lock);
33996 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33997- atomic_set(&wa->xfer_id_count, 1);
33998+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33999 }
34000
34001 /**
34002diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
34003--- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
34004+++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
34005@@ -294,7 +294,7 @@ out:
34006 */
34007 static void wa_xfer_id_init(struct wa_xfer *xfer)
34008 {
34009- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
34010+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
34011 }
34012
34013 /*
34014diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
34015--- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
34016+++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
34017@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
34018 return get_user(vq->last_used_idx, &used->idx);
34019 }
34020
34021-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
34022+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
34023 {
34024 struct file *eventfp, *filep = NULL,
34025 *pollstart = NULL, *pollstop = NULL;
34026diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
34027--- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
34028+++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
34029@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
34030 rc = -ENODEV;
34031 goto out;
34032 }
34033- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
34034- !info->fbops->fb_setcmap)) {
34035+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
34036 rc = -EINVAL;
34037 goto out1;
34038 }
34039diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
34040--- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
34041+++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
34042@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
34043 image->dx += image->width + 8;
34044 }
34045 } else if (rotate == FB_ROTATE_UD) {
34046- for (x = 0; x < num && image->dx >= 0; x++) {
34047+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
34048 info->fbops->fb_imageblit(info, image);
34049 image->dx -= image->width + 8;
34050 }
34051@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
34052 image->dy += image->height + 8;
34053 }
34054 } else if (rotate == FB_ROTATE_CCW) {
34055- for (x = 0; x < num && image->dy >= 0; x++) {
34056+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
34057 info->fbops->fb_imageblit(info, image);
34058 image->dy -= image->height + 8;
34059 }
34060@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
34061 int flags = info->flags;
34062 int ret = 0;
34063
34064+ pax_track_stack();
34065+
34066 if (var->activate & FB_ACTIVATE_INV_MODE) {
34067 struct fb_videomode mode1, mode2;
34068
34069@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
34070 void __user *argp = (void __user *)arg;
34071 long ret = 0;
34072
34073+ pax_track_stack();
34074+
34075 switch (cmd) {
34076 case FBIOGET_VSCREENINFO:
34077 if (!lock_fb_info(info))
34078@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
34079 return -EFAULT;
34080 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
34081 return -EINVAL;
34082- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
34083+ if (con2fb.framebuffer >= FB_MAX)
34084 return -EINVAL;
34085 if (!registered_fb[con2fb.framebuffer])
34086 request_module("fb%d", con2fb.framebuffer);
34087diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
34088--- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
34089+++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
34090@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
34091 }
34092 }
34093 printk("ringbuffer lockup!!!\n");
34094+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
34095 i810_report_error(mmio);
34096 par->dev_flags |= LOCKUP;
34097 info->pixmap.scan_align = 1;
34098diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
34099--- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
34100+++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
34101@@ -1,1604 +1,1123 @@
34102 P3
34103-# Standard 224-color Linux logo
34104 80 80
34105 255
34106- 0 0 0 0 0 0 0 0 0 0 0 0
34107- 0 0 0 0 0 0 0 0 0 0 0 0
34108- 0 0 0 0 0 0 0 0 0 0 0 0
34109- 0 0 0 0 0 0 0 0 0 0 0 0
34110- 0 0 0 0 0 0 0 0 0 0 0 0
34111- 0 0 0 0 0 0 0 0 0 0 0 0
34112- 0 0 0 0 0 0 0 0 0 0 0 0
34113- 0 0 0 0 0 0 0 0 0 0 0 0
34114- 0 0 0 0 0 0 0 0 0 0 0 0
34115- 6 6 6 6 6 6 10 10 10 10 10 10
34116- 10 10 10 6 6 6 6 6 6 6 6 6
34117- 0 0 0 0 0 0 0 0 0 0 0 0
34118- 0 0 0 0 0 0 0 0 0 0 0 0
34119- 0 0 0 0 0 0 0 0 0 0 0 0
34120- 0 0 0 0 0 0 0 0 0 0 0 0
34121- 0 0 0 0 0 0 0 0 0 0 0 0
34122- 0 0 0 0 0 0 0 0 0 0 0 0
34123- 0 0 0 0 0 0 0 0 0 0 0 0
34124- 0 0 0 0 0 0 0 0 0 0 0 0
34125- 0 0 0 0 0 0 0 0 0 0 0 0
34126- 0 0 0 0 0 0 0 0 0 0 0 0
34127- 0 0 0 0 0 0 0 0 0 0 0 0
34128- 0 0 0 0 0 0 0 0 0 0 0 0
34129- 0 0 0 0 0 0 0 0 0 0 0 0
34130- 0 0 0 0 0 0 0 0 0 0 0 0
34131- 0 0 0 0 0 0 0 0 0 0 0 0
34132- 0 0 0 0 0 0 0 0 0 0 0 0
34133- 0 0 0 0 0 0 0 0 0 0 0 0
34134- 0 0 0 6 6 6 10 10 10 14 14 14
34135- 22 22 22 26 26 26 30 30 30 34 34 34
34136- 30 30 30 30 30 30 26 26 26 18 18 18
34137- 14 14 14 10 10 10 6 6 6 0 0 0
34138- 0 0 0 0 0 0 0 0 0 0 0 0
34139- 0 0 0 0 0 0 0 0 0 0 0 0
34140- 0 0 0 0 0 0 0 0 0 0 0 0
34141- 0 0 0 0 0 0 0 0 0 0 0 0
34142- 0 0 0 0 0 0 0 0 0 0 0 0
34143- 0 0 0 0 0 0 0 0 0 0 0 0
34144- 0 0 0 0 0 0 0 0 0 0 0 0
34145- 0 0 0 0 0 0 0 0 0 0 0 0
34146- 0 0 0 0 0 0 0 0 0 0 0 0
34147- 0 0 0 0 0 1 0 0 1 0 0 0
34148- 0 0 0 0 0 0 0 0 0 0 0 0
34149- 0 0 0 0 0 0 0 0 0 0 0 0
34150- 0 0 0 0 0 0 0 0 0 0 0 0
34151- 0 0 0 0 0 0 0 0 0 0 0 0
34152- 0 0 0 0 0 0 0 0 0 0 0 0
34153- 0 0 0 0 0 0 0 0 0 0 0 0
34154- 6 6 6 14 14 14 26 26 26 42 42 42
34155- 54 54 54 66 66 66 78 78 78 78 78 78
34156- 78 78 78 74 74 74 66 66 66 54 54 54
34157- 42 42 42 26 26 26 18 18 18 10 10 10
34158- 6 6 6 0 0 0 0 0 0 0 0 0
34159- 0 0 0 0 0 0 0 0 0 0 0 0
34160- 0 0 0 0 0 0 0 0 0 0 0 0
34161- 0 0 0 0 0 0 0 0 0 0 0 0
34162- 0 0 0 0 0 0 0 0 0 0 0 0
34163- 0 0 0 0 0 0 0 0 0 0 0 0
34164- 0 0 0 0 0 0 0 0 0 0 0 0
34165- 0 0 0 0 0 0 0 0 0 0 0 0
34166- 0 0 0 0 0 0 0 0 0 0 0 0
34167- 0 0 1 0 0 0 0 0 0 0 0 0
34168- 0 0 0 0 0 0 0 0 0 0 0 0
34169- 0 0 0 0 0 0 0 0 0 0 0 0
34170- 0 0 0 0 0 0 0 0 0 0 0 0
34171- 0 0 0 0 0 0 0 0 0 0 0 0
34172- 0 0 0 0 0 0 0 0 0 0 0 0
34173- 0 0 0 0 0 0 0 0 0 10 10 10
34174- 22 22 22 42 42 42 66 66 66 86 86 86
34175- 66 66 66 38 38 38 38 38 38 22 22 22
34176- 26 26 26 34 34 34 54 54 54 66 66 66
34177- 86 86 86 70 70 70 46 46 46 26 26 26
34178- 14 14 14 6 6 6 0 0 0 0 0 0
34179- 0 0 0 0 0 0 0 0 0 0 0 0
34180- 0 0 0 0 0 0 0 0 0 0 0 0
34181- 0 0 0 0 0 0 0 0 0 0 0 0
34182- 0 0 0 0 0 0 0 0 0 0 0 0
34183- 0 0 0 0 0 0 0 0 0 0 0 0
34184- 0 0 0 0 0 0 0 0 0 0 0 0
34185- 0 0 0 0 0 0 0 0 0 0 0 0
34186- 0 0 0 0 0 0 0 0 0 0 0 0
34187- 0 0 1 0 0 1 0 0 1 0 0 0
34188- 0 0 0 0 0 0 0 0 0 0 0 0
34189- 0 0 0 0 0 0 0 0 0 0 0 0
34190- 0 0 0 0 0 0 0 0 0 0 0 0
34191- 0 0 0 0 0 0 0 0 0 0 0 0
34192- 0 0 0 0 0 0 0 0 0 0 0 0
34193- 0 0 0 0 0 0 10 10 10 26 26 26
34194- 50 50 50 82 82 82 58 58 58 6 6 6
34195- 2 2 6 2 2 6 2 2 6 2 2 6
34196- 2 2 6 2 2 6 2 2 6 2 2 6
34197- 6 6 6 54 54 54 86 86 86 66 66 66
34198- 38 38 38 18 18 18 6 6 6 0 0 0
34199- 0 0 0 0 0 0 0 0 0 0 0 0
34200- 0 0 0 0 0 0 0 0 0 0 0 0
34201- 0 0 0 0 0 0 0 0 0 0 0 0
34202- 0 0 0 0 0 0 0 0 0 0 0 0
34203- 0 0 0 0 0 0 0 0 0 0 0 0
34204- 0 0 0 0 0 0 0 0 0 0 0 0
34205- 0 0 0 0 0 0 0 0 0 0 0 0
34206- 0 0 0 0 0 0 0 0 0 0 0 0
34207- 0 0 0 0 0 0 0 0 0 0 0 0
34208- 0 0 0 0 0 0 0 0 0 0 0 0
34209- 0 0 0 0 0 0 0 0 0 0 0 0
34210- 0 0 0 0 0 0 0 0 0 0 0 0
34211- 0 0 0 0 0 0 0 0 0 0 0 0
34212- 0 0 0 0 0 0 0 0 0 0 0 0
34213- 0 0 0 6 6 6 22 22 22 50 50 50
34214- 78 78 78 34 34 34 2 2 6 2 2 6
34215- 2 2 6 2 2 6 2 2 6 2 2 6
34216- 2 2 6 2 2 6 2 2 6 2 2 6
34217- 2 2 6 2 2 6 6 6 6 70 70 70
34218- 78 78 78 46 46 46 22 22 22 6 6 6
34219- 0 0 0 0 0 0 0 0 0 0 0 0
34220- 0 0 0 0 0 0 0 0 0 0 0 0
34221- 0 0 0 0 0 0 0 0 0 0 0 0
34222- 0 0 0 0 0 0 0 0 0 0 0 0
34223- 0 0 0 0 0 0 0 0 0 0 0 0
34224- 0 0 0 0 0 0 0 0 0 0 0 0
34225- 0 0 0 0 0 0 0 0 0 0 0 0
34226- 0 0 0 0 0 0 0 0 0 0 0 0
34227- 0 0 1 0 0 1 0 0 1 0 0 0
34228- 0 0 0 0 0 0 0 0 0 0 0 0
34229- 0 0 0 0 0 0 0 0 0 0 0 0
34230- 0 0 0 0 0 0 0 0 0 0 0 0
34231- 0 0 0 0 0 0 0 0 0 0 0 0
34232- 0 0 0 0 0 0 0 0 0 0 0 0
34233- 6 6 6 18 18 18 42 42 42 82 82 82
34234- 26 26 26 2 2 6 2 2 6 2 2 6
34235- 2 2 6 2 2 6 2 2 6 2 2 6
34236- 2 2 6 2 2 6 2 2 6 14 14 14
34237- 46 46 46 34 34 34 6 6 6 2 2 6
34238- 42 42 42 78 78 78 42 42 42 18 18 18
34239- 6 6 6 0 0 0 0 0 0 0 0 0
34240- 0 0 0 0 0 0 0 0 0 0 0 0
34241- 0 0 0 0 0 0 0 0 0 0 0 0
34242- 0 0 0 0 0 0 0 0 0 0 0 0
34243- 0 0 0 0 0 0 0 0 0 0 0 0
34244- 0 0 0 0 0 0 0 0 0 0 0 0
34245- 0 0 0 0 0 0 0 0 0 0 0 0
34246- 0 0 0 0 0 0 0 0 0 0 0 0
34247- 0 0 1 0 0 0 0 0 1 0 0 0
34248- 0 0 0 0 0 0 0 0 0 0 0 0
34249- 0 0 0 0 0 0 0 0 0 0 0 0
34250- 0 0 0 0 0 0 0 0 0 0 0 0
34251- 0 0 0 0 0 0 0 0 0 0 0 0
34252- 0 0 0 0 0 0 0 0 0 0 0 0
34253- 10 10 10 30 30 30 66 66 66 58 58 58
34254- 2 2 6 2 2 6 2 2 6 2 2 6
34255- 2 2 6 2 2 6 2 2 6 2 2 6
34256- 2 2 6 2 2 6 2 2 6 26 26 26
34257- 86 86 86 101 101 101 46 46 46 10 10 10
34258- 2 2 6 58 58 58 70 70 70 34 34 34
34259- 10 10 10 0 0 0 0 0 0 0 0 0
34260- 0 0 0 0 0 0 0 0 0 0 0 0
34261- 0 0 0 0 0 0 0 0 0 0 0 0
34262- 0 0 0 0 0 0 0 0 0 0 0 0
34263- 0 0 0 0 0 0 0 0 0 0 0 0
34264- 0 0 0 0 0 0 0 0 0 0 0 0
34265- 0 0 0 0 0 0 0 0 0 0 0 0
34266- 0 0 0 0 0 0 0 0 0 0 0 0
34267- 0 0 1 0 0 1 0 0 1 0 0 0
34268- 0 0 0 0 0 0 0 0 0 0 0 0
34269- 0 0 0 0 0 0 0 0 0 0 0 0
34270- 0 0 0 0 0 0 0 0 0 0 0 0
34271- 0 0 0 0 0 0 0 0 0 0 0 0
34272- 0 0 0 0 0 0 0 0 0 0 0 0
34273- 14 14 14 42 42 42 86 86 86 10 10 10
34274- 2 2 6 2 2 6 2 2 6 2 2 6
34275- 2 2 6 2 2 6 2 2 6 2 2 6
34276- 2 2 6 2 2 6 2 2 6 30 30 30
34277- 94 94 94 94 94 94 58 58 58 26 26 26
34278- 2 2 6 6 6 6 78 78 78 54 54 54
34279- 22 22 22 6 6 6 0 0 0 0 0 0
34280- 0 0 0 0 0 0 0 0 0 0 0 0
34281- 0 0 0 0 0 0 0 0 0 0 0 0
34282- 0 0 0 0 0 0 0 0 0 0 0 0
34283- 0 0 0 0 0 0 0 0 0 0 0 0
34284- 0 0 0 0 0 0 0 0 0 0 0 0
34285- 0 0 0 0 0 0 0 0 0 0 0 0
34286- 0 0 0 0 0 0 0 0 0 0 0 0
34287- 0 0 0 0 0 0 0 0 0 0 0 0
34288- 0 0 0 0 0 0 0 0 0 0 0 0
34289- 0 0 0 0 0 0 0 0 0 0 0 0
34290- 0 0 0 0 0 0 0 0 0 0 0 0
34291- 0 0 0 0 0 0 0 0 0 0 0 0
34292- 0 0 0 0 0 0 0 0 0 6 6 6
34293- 22 22 22 62 62 62 62 62 62 2 2 6
34294- 2 2 6 2 2 6 2 2 6 2 2 6
34295- 2 2 6 2 2 6 2 2 6 2 2 6
34296- 2 2 6 2 2 6 2 2 6 26 26 26
34297- 54 54 54 38 38 38 18 18 18 10 10 10
34298- 2 2 6 2 2 6 34 34 34 82 82 82
34299- 38 38 38 14 14 14 0 0 0 0 0 0
34300- 0 0 0 0 0 0 0 0 0 0 0 0
34301- 0 0 0 0 0 0 0 0 0 0 0 0
34302- 0 0 0 0 0 0 0 0 0 0 0 0
34303- 0 0 0 0 0 0 0 0 0 0 0 0
34304- 0 0 0 0 0 0 0 0 0 0 0 0
34305- 0 0 0 0 0 0 0 0 0 0 0 0
34306- 0 0 0 0 0 0 0 0 0 0 0 0
34307- 0 0 0 0 0 1 0 0 1 0 0 0
34308- 0 0 0 0 0 0 0 0 0 0 0 0
34309- 0 0 0 0 0 0 0 0 0 0 0 0
34310- 0 0 0 0 0 0 0 0 0 0 0 0
34311- 0 0 0 0 0 0 0 0 0 0 0 0
34312- 0 0 0 0 0 0 0 0 0 6 6 6
34313- 30 30 30 78 78 78 30 30 30 2 2 6
34314- 2 2 6 2 2 6 2 2 6 2 2 6
34315- 2 2 6 2 2 6 2 2 6 2 2 6
34316- 2 2 6 2 2 6 2 2 6 10 10 10
34317- 10 10 10 2 2 6 2 2 6 2 2 6
34318- 2 2 6 2 2 6 2 2 6 78 78 78
34319- 50 50 50 18 18 18 6 6 6 0 0 0
34320- 0 0 0 0 0 0 0 0 0 0 0 0
34321- 0 0 0 0 0 0 0 0 0 0 0 0
34322- 0 0 0 0 0 0 0 0 0 0 0 0
34323- 0 0 0 0 0 0 0 0 0 0 0 0
34324- 0 0 0 0 0 0 0 0 0 0 0 0
34325- 0 0 0 0 0 0 0 0 0 0 0 0
34326- 0 0 0 0 0 0 0 0 0 0 0 0
34327- 0 0 1 0 0 0 0 0 0 0 0 0
34328- 0 0 0 0 0 0 0 0 0 0 0 0
34329- 0 0 0 0 0 0 0 0 0 0 0 0
34330- 0 0 0 0 0 0 0 0 0 0 0 0
34331- 0 0 0 0 0 0 0 0 0 0 0 0
34332- 0 0 0 0 0 0 0 0 0 10 10 10
34333- 38 38 38 86 86 86 14 14 14 2 2 6
34334- 2 2 6 2 2 6 2 2 6 2 2 6
34335- 2 2 6 2 2 6 2 2 6 2 2 6
34336- 2 2 6 2 2 6 2 2 6 2 2 6
34337- 2 2 6 2 2 6 2 2 6 2 2 6
34338- 2 2 6 2 2 6 2 2 6 54 54 54
34339- 66 66 66 26 26 26 6 6 6 0 0 0
34340- 0 0 0 0 0 0 0 0 0 0 0 0
34341- 0 0 0 0 0 0 0 0 0 0 0 0
34342- 0 0 0 0 0 0 0 0 0 0 0 0
34343- 0 0 0 0 0 0 0 0 0 0 0 0
34344- 0 0 0 0 0 0 0 0 0 0 0 0
34345- 0 0 0 0 0 0 0 0 0 0 0 0
34346- 0 0 0 0 0 0 0 0 0 0 0 0
34347- 0 0 0 0 0 1 0 0 1 0 0 0
34348- 0 0 0 0 0 0 0 0 0 0 0 0
34349- 0 0 0 0 0 0 0 0 0 0 0 0
34350- 0 0 0 0 0 0 0 0 0 0 0 0
34351- 0 0 0 0 0 0 0 0 0 0 0 0
34352- 0 0 0 0 0 0 0 0 0 14 14 14
34353- 42 42 42 82 82 82 2 2 6 2 2 6
34354- 2 2 6 6 6 6 10 10 10 2 2 6
34355- 2 2 6 2 2 6 2 2 6 2 2 6
34356- 2 2 6 2 2 6 2 2 6 6 6 6
34357- 14 14 14 10 10 10 2 2 6 2 2 6
34358- 2 2 6 2 2 6 2 2 6 18 18 18
34359- 82 82 82 34 34 34 10 10 10 0 0 0
34360- 0 0 0 0 0 0 0 0 0 0 0 0
34361- 0 0 0 0 0 0 0 0 0 0 0 0
34362- 0 0 0 0 0 0 0 0 0 0 0 0
34363- 0 0 0 0 0 0 0 0 0 0 0 0
34364- 0 0 0 0 0 0 0 0 0 0 0 0
34365- 0 0 0 0 0 0 0 0 0 0 0 0
34366- 0 0 0 0 0 0 0 0 0 0 0 0
34367- 0 0 1 0 0 0 0 0 0 0 0 0
34368- 0 0 0 0 0 0 0 0 0 0 0 0
34369- 0 0 0 0 0 0 0 0 0 0 0 0
34370- 0 0 0 0 0 0 0 0 0 0 0 0
34371- 0 0 0 0 0 0 0 0 0 0 0 0
34372- 0 0 0 0 0 0 0 0 0 14 14 14
34373- 46 46 46 86 86 86 2 2 6 2 2 6
34374- 6 6 6 6 6 6 22 22 22 34 34 34
34375- 6 6 6 2 2 6 2 2 6 2 2 6
34376- 2 2 6 2 2 6 18 18 18 34 34 34
34377- 10 10 10 50 50 50 22 22 22 2 2 6
34378- 2 2 6 2 2 6 2 2 6 10 10 10
34379- 86 86 86 42 42 42 14 14 14 0 0 0
34380- 0 0 0 0 0 0 0 0 0 0 0 0
34381- 0 0 0 0 0 0 0 0 0 0 0 0
34382- 0 0 0 0 0 0 0 0 0 0 0 0
34383- 0 0 0 0 0 0 0 0 0 0 0 0
34384- 0 0 0 0 0 0 0 0 0 0 0 0
34385- 0 0 0 0 0 0 0 0 0 0 0 0
34386- 0 0 0 0 0 0 0 0 0 0 0 0
34387- 0 0 1 0 0 1 0 0 1 0 0 0
34388- 0 0 0 0 0 0 0 0 0 0 0 0
34389- 0 0 0 0 0 0 0 0 0 0 0 0
34390- 0 0 0 0 0 0 0 0 0 0 0 0
34391- 0 0 0 0 0 0 0 0 0 0 0 0
34392- 0 0 0 0 0 0 0 0 0 14 14 14
34393- 46 46 46 86 86 86 2 2 6 2 2 6
34394- 38 38 38 116 116 116 94 94 94 22 22 22
34395- 22 22 22 2 2 6 2 2 6 2 2 6
34396- 14 14 14 86 86 86 138 138 138 162 162 162
34397-154 154 154 38 38 38 26 26 26 6 6 6
34398- 2 2 6 2 2 6 2 2 6 2 2 6
34399- 86 86 86 46 46 46 14 14 14 0 0 0
34400- 0 0 0 0 0 0 0 0 0 0 0 0
34401- 0 0 0 0 0 0 0 0 0 0 0 0
34402- 0 0 0 0 0 0 0 0 0 0 0 0
34403- 0 0 0 0 0 0 0 0 0 0 0 0
34404- 0 0 0 0 0 0 0 0 0 0 0 0
34405- 0 0 0 0 0 0 0 0 0 0 0 0
34406- 0 0 0 0 0 0 0 0 0 0 0 0
34407- 0 0 0 0 0 0 0 0 0 0 0 0
34408- 0 0 0 0 0 0 0 0 0 0 0 0
34409- 0 0 0 0 0 0 0 0 0 0 0 0
34410- 0 0 0 0 0 0 0 0 0 0 0 0
34411- 0 0 0 0 0 0 0 0 0 0 0 0
34412- 0 0 0 0 0 0 0 0 0 14 14 14
34413- 46 46 46 86 86 86 2 2 6 14 14 14
34414-134 134 134 198 198 198 195 195 195 116 116 116
34415- 10 10 10 2 2 6 2 2 6 6 6 6
34416-101 98 89 187 187 187 210 210 210 218 218 218
34417-214 214 214 134 134 134 14 14 14 6 6 6
34418- 2 2 6 2 2 6 2 2 6 2 2 6
34419- 86 86 86 50 50 50 18 18 18 6 6 6
34420- 0 0 0 0 0 0 0 0 0 0 0 0
34421- 0 0 0 0 0 0 0 0 0 0 0 0
34422- 0 0 0 0 0 0 0 0 0 0 0 0
34423- 0 0 0 0 0 0 0 0 0 0 0 0
34424- 0 0 0 0 0 0 0 0 0 0 0 0
34425- 0 0 0 0 0 0 0 0 0 0 0 0
34426- 0 0 0 0 0 0 0 0 1 0 0 0
34427- 0 0 1 0 0 1 0 0 1 0 0 0
34428- 0 0 0 0 0 0 0 0 0 0 0 0
34429- 0 0 0 0 0 0 0 0 0 0 0 0
34430- 0 0 0 0 0 0 0 0 0 0 0 0
34431- 0 0 0 0 0 0 0 0 0 0 0 0
34432- 0 0 0 0 0 0 0 0 0 14 14 14
34433- 46 46 46 86 86 86 2 2 6 54 54 54
34434-218 218 218 195 195 195 226 226 226 246 246 246
34435- 58 58 58 2 2 6 2 2 6 30 30 30
34436-210 210 210 253 253 253 174 174 174 123 123 123
34437-221 221 221 234 234 234 74 74 74 2 2 6
34438- 2 2 6 2 2 6 2 2 6 2 2 6
34439- 70 70 70 58 58 58 22 22 22 6 6 6
34440- 0 0 0 0 0 0 0 0 0 0 0 0
34441- 0 0 0 0 0 0 0 0 0 0 0 0
34442- 0 0 0 0 0 0 0 0 0 0 0 0
34443- 0 0 0 0 0 0 0 0 0 0 0 0
34444- 0 0 0 0 0 0 0 0 0 0 0 0
34445- 0 0 0 0 0 0 0 0 0 0 0 0
34446- 0 0 0 0 0 0 0 0 0 0 0 0
34447- 0 0 0 0 0 0 0 0 0 0 0 0
34448- 0 0 0 0 0 0 0 0 0 0 0 0
34449- 0 0 0 0 0 0 0 0 0 0 0 0
34450- 0 0 0 0 0 0 0 0 0 0 0 0
34451- 0 0 0 0 0 0 0 0 0 0 0 0
34452- 0 0 0 0 0 0 0 0 0 14 14 14
34453- 46 46 46 82 82 82 2 2 6 106 106 106
34454-170 170 170 26 26 26 86 86 86 226 226 226
34455-123 123 123 10 10 10 14 14 14 46 46 46
34456-231 231 231 190 190 190 6 6 6 70 70 70
34457- 90 90 90 238 238 238 158 158 158 2 2 6
34458- 2 2 6 2 2 6 2 2 6 2 2 6
34459- 70 70 70 58 58 58 22 22 22 6 6 6
34460- 0 0 0 0 0 0 0 0 0 0 0 0
34461- 0 0 0 0 0 0 0 0 0 0 0 0
34462- 0 0 0 0 0 0 0 0 0 0 0 0
34463- 0 0 0 0 0 0 0 0 0 0 0 0
34464- 0 0 0 0 0 0 0 0 0 0 0 0
34465- 0 0 0 0 0 0 0 0 0 0 0 0
34466- 0 0 0 0 0 0 0 0 1 0 0 0
34467- 0 0 1 0 0 1 0 0 1 0 0 0
34468- 0 0 0 0 0 0 0 0 0 0 0 0
34469- 0 0 0 0 0 0 0 0 0 0 0 0
34470- 0 0 0 0 0 0 0 0 0 0 0 0
34471- 0 0 0 0 0 0 0 0 0 0 0 0
34472- 0 0 0 0 0 0 0 0 0 14 14 14
34473- 42 42 42 86 86 86 6 6 6 116 116 116
34474-106 106 106 6 6 6 70 70 70 149 149 149
34475-128 128 128 18 18 18 38 38 38 54 54 54
34476-221 221 221 106 106 106 2 2 6 14 14 14
34477- 46 46 46 190 190 190 198 198 198 2 2 6
34478- 2 2 6 2 2 6 2 2 6 2 2 6
34479- 74 74 74 62 62 62 22 22 22 6 6 6
34480- 0 0 0 0 0 0 0 0 0 0 0 0
34481- 0 0 0 0 0 0 0 0 0 0 0 0
34482- 0 0 0 0 0 0 0 0 0 0 0 0
34483- 0 0 0 0 0 0 0 0 0 0 0 0
34484- 0 0 0 0 0 0 0 0 0 0 0 0
34485- 0 0 0 0 0 0 0 0 0 0 0 0
34486- 0 0 0 0 0 0 0 0 1 0 0 0
34487- 0 0 1 0 0 0 0 0 1 0 0 0
34488- 0 0 0 0 0 0 0 0 0 0 0 0
34489- 0 0 0 0 0 0 0 0 0 0 0 0
34490- 0 0 0 0 0 0 0 0 0 0 0 0
34491- 0 0 0 0 0 0 0 0 0 0 0 0
34492- 0 0 0 0 0 0 0 0 0 14 14 14
34493- 42 42 42 94 94 94 14 14 14 101 101 101
34494-128 128 128 2 2 6 18 18 18 116 116 116
34495-118 98 46 121 92 8 121 92 8 98 78 10
34496-162 162 162 106 106 106 2 2 6 2 2 6
34497- 2 2 6 195 195 195 195 195 195 6 6 6
34498- 2 2 6 2 2 6 2 2 6 2 2 6
34499- 74 74 74 62 62 62 22 22 22 6 6 6
34500- 0 0 0 0 0 0 0 0 0 0 0 0
34501- 0 0 0 0 0 0 0 0 0 0 0 0
34502- 0 0 0 0 0 0 0 0 0 0 0 0
34503- 0 0 0 0 0 0 0 0 0 0 0 0
34504- 0 0 0 0 0 0 0 0 0 0 0 0
34505- 0 0 0 0 0 0 0 0 0 0 0 0
34506- 0 0 0 0 0 0 0 0 1 0 0 1
34507- 0 0 1 0 0 0 0 0 1 0 0 0
34508- 0 0 0 0 0 0 0 0 0 0 0 0
34509- 0 0 0 0 0 0 0 0 0 0 0 0
34510- 0 0 0 0 0 0 0 0 0 0 0 0
34511- 0 0 0 0 0 0 0 0 0 0 0 0
34512- 0 0 0 0 0 0 0 0 0 10 10 10
34513- 38 38 38 90 90 90 14 14 14 58 58 58
34514-210 210 210 26 26 26 54 38 6 154 114 10
34515-226 170 11 236 186 11 225 175 15 184 144 12
34516-215 174 15 175 146 61 37 26 9 2 2 6
34517- 70 70 70 246 246 246 138 138 138 2 2 6
34518- 2 2 6 2 2 6 2 2 6 2 2 6
34519- 70 70 70 66 66 66 26 26 26 6 6 6
34520- 0 0 0 0 0 0 0 0 0 0 0 0
34521- 0 0 0 0 0 0 0 0 0 0 0 0
34522- 0 0 0 0 0 0 0 0 0 0 0 0
34523- 0 0 0 0 0 0 0 0 0 0 0 0
34524- 0 0 0 0 0 0 0 0 0 0 0 0
34525- 0 0 0 0 0 0 0 0 0 0 0 0
34526- 0 0 0 0 0 0 0 0 0 0 0 0
34527- 0 0 0 0 0 0 0 0 0 0 0 0
34528- 0 0 0 0 0 0 0 0 0 0 0 0
34529- 0 0 0 0 0 0 0 0 0 0 0 0
34530- 0 0 0 0 0 0 0 0 0 0 0 0
34531- 0 0 0 0 0 0 0 0 0 0 0 0
34532- 0 0 0 0 0 0 0 0 0 10 10 10
34533- 38 38 38 86 86 86 14 14 14 10 10 10
34534-195 195 195 188 164 115 192 133 9 225 175 15
34535-239 182 13 234 190 10 232 195 16 232 200 30
34536-245 207 45 241 208 19 232 195 16 184 144 12
34537-218 194 134 211 206 186 42 42 42 2 2 6
34538- 2 2 6 2 2 6 2 2 6 2 2 6
34539- 50 50 50 74 74 74 30 30 30 6 6 6
34540- 0 0 0 0 0 0 0 0 0 0 0 0
34541- 0 0 0 0 0 0 0 0 0 0 0 0
34542- 0 0 0 0 0 0 0 0 0 0 0 0
34543- 0 0 0 0 0 0 0 0 0 0 0 0
34544- 0 0 0 0 0 0 0 0 0 0 0 0
34545- 0 0 0 0 0 0 0 0 0 0 0 0
34546- 0 0 0 0 0 0 0 0 0 0 0 0
34547- 0 0 0 0 0 0 0 0 0 0 0 0
34548- 0 0 0 0 0 0 0 0 0 0 0 0
34549- 0 0 0 0 0 0 0 0 0 0 0 0
34550- 0 0 0 0 0 0 0 0 0 0 0 0
34551- 0 0 0 0 0 0 0 0 0 0 0 0
34552- 0 0 0 0 0 0 0 0 0 10 10 10
34553- 34 34 34 86 86 86 14 14 14 2 2 6
34554-121 87 25 192 133 9 219 162 10 239 182 13
34555-236 186 11 232 195 16 241 208 19 244 214 54
34556-246 218 60 246 218 38 246 215 20 241 208 19
34557-241 208 19 226 184 13 121 87 25 2 2 6
34558- 2 2 6 2 2 6 2 2 6 2 2 6
34559- 50 50 50 82 82 82 34 34 34 10 10 10
34560- 0 0 0 0 0 0 0 0 0 0 0 0
34561- 0 0 0 0 0 0 0 0 0 0 0 0
34562- 0 0 0 0 0 0 0 0 0 0 0 0
34563- 0 0 0 0 0 0 0 0 0 0 0 0
34564- 0 0 0 0 0 0 0 0 0 0 0 0
34565- 0 0 0 0 0 0 0 0 0 0 0 0
34566- 0 0 0 0 0 0 0 0 0 0 0 0
34567- 0 0 0 0 0 0 0 0 0 0 0 0
34568- 0 0 0 0 0 0 0 0 0 0 0 0
34569- 0 0 0 0 0 0 0 0 0 0 0 0
34570- 0 0 0 0 0 0 0 0 0 0 0 0
34571- 0 0 0 0 0 0 0 0 0 0 0 0
34572- 0 0 0 0 0 0 0 0 0 10 10 10
34573- 34 34 34 82 82 82 30 30 30 61 42 6
34574-180 123 7 206 145 10 230 174 11 239 182 13
34575-234 190 10 238 202 15 241 208 19 246 218 74
34576-246 218 38 246 215 20 246 215 20 246 215 20
34577-226 184 13 215 174 15 184 144 12 6 6 6
34578- 2 2 6 2 2 6 2 2 6 2 2 6
34579- 26 26 26 94 94 94 42 42 42 14 14 14
34580- 0 0 0 0 0 0 0 0 0 0 0 0
34581- 0 0 0 0 0 0 0 0 0 0 0 0
34582- 0 0 0 0 0 0 0 0 0 0 0 0
34583- 0 0 0 0 0 0 0 0 0 0 0 0
34584- 0 0 0 0 0 0 0 0 0 0 0 0
34585- 0 0 0 0 0 0 0 0 0 0 0 0
34586- 0 0 0 0 0 0 0 0 0 0 0 0
34587- 0 0 0 0 0 0 0 0 0 0 0 0
34588- 0 0 0 0 0 0 0 0 0 0 0 0
34589- 0 0 0 0 0 0 0 0 0 0 0 0
34590- 0 0 0 0 0 0 0 0 0 0 0 0
34591- 0 0 0 0 0 0 0 0 0 0 0 0
34592- 0 0 0 0 0 0 0 0 0 10 10 10
34593- 30 30 30 78 78 78 50 50 50 104 69 6
34594-192 133 9 216 158 10 236 178 12 236 186 11
34595-232 195 16 241 208 19 244 214 54 245 215 43
34596-246 215 20 246 215 20 241 208 19 198 155 10
34597-200 144 11 216 158 10 156 118 10 2 2 6
34598- 2 2 6 2 2 6 2 2 6 2 2 6
34599- 6 6 6 90 90 90 54 54 54 18 18 18
34600- 6 6 6 0 0 0 0 0 0 0 0 0
34601- 0 0 0 0 0 0 0 0 0 0 0 0
34602- 0 0 0 0 0 0 0 0 0 0 0 0
34603- 0 0 0 0 0 0 0 0 0 0 0 0
34604- 0 0 0 0 0 0 0 0 0 0 0 0
34605- 0 0 0 0 0 0 0 0 0 0 0 0
34606- 0 0 0 0 0 0 0 0 0 0 0 0
34607- 0 0 0 0 0 0 0 0 0 0 0 0
34608- 0 0 0 0 0 0 0 0 0 0 0 0
34609- 0 0 0 0 0 0 0 0 0 0 0 0
34610- 0 0 0 0 0 0 0 0 0 0 0 0
34611- 0 0 0 0 0 0 0 0 0 0 0 0
34612- 0 0 0 0 0 0 0 0 0 10 10 10
34613- 30 30 30 78 78 78 46 46 46 22 22 22
34614-137 92 6 210 162 10 239 182 13 238 190 10
34615-238 202 15 241 208 19 246 215 20 246 215 20
34616-241 208 19 203 166 17 185 133 11 210 150 10
34617-216 158 10 210 150 10 102 78 10 2 2 6
34618- 6 6 6 54 54 54 14 14 14 2 2 6
34619- 2 2 6 62 62 62 74 74 74 30 30 30
34620- 10 10 10 0 0 0 0 0 0 0 0 0
34621- 0 0 0 0 0 0 0 0 0 0 0 0
34622- 0 0 0 0 0 0 0 0 0 0 0 0
34623- 0 0 0 0 0 0 0 0 0 0 0 0
34624- 0 0 0 0 0 0 0 0 0 0 0 0
34625- 0 0 0 0 0 0 0 0 0 0 0 0
34626- 0 0 0 0 0 0 0 0 0 0 0 0
34627- 0 0 0 0 0 0 0 0 0 0 0 0
34628- 0 0 0 0 0 0 0 0 0 0 0 0
34629- 0 0 0 0 0 0 0 0 0 0 0 0
34630- 0 0 0 0 0 0 0 0 0 0 0 0
34631- 0 0 0 0 0 0 0 0 0 0 0 0
34632- 0 0 0 0 0 0 0 0 0 10 10 10
34633- 34 34 34 78 78 78 50 50 50 6 6 6
34634- 94 70 30 139 102 15 190 146 13 226 184 13
34635-232 200 30 232 195 16 215 174 15 190 146 13
34636-168 122 10 192 133 9 210 150 10 213 154 11
34637-202 150 34 182 157 106 101 98 89 2 2 6
34638- 2 2 6 78 78 78 116 116 116 58 58 58
34639- 2 2 6 22 22 22 90 90 90 46 46 46
34640- 18 18 18 6 6 6 0 0 0 0 0 0
34641- 0 0 0 0 0 0 0 0 0 0 0 0
34642- 0 0 0 0 0 0 0 0 0 0 0 0
34643- 0 0 0 0 0 0 0 0 0 0 0 0
34644- 0 0 0 0 0 0 0 0 0 0 0 0
34645- 0 0 0 0 0 0 0 0 0 0 0 0
34646- 0 0 0 0 0 0 0 0 0 0 0 0
34647- 0 0 0 0 0 0 0 0 0 0 0 0
34648- 0 0 0 0 0 0 0 0 0 0 0 0
34649- 0 0 0 0 0 0 0 0 0 0 0 0
34650- 0 0 0 0 0 0 0 0 0 0 0 0
34651- 0 0 0 0 0 0 0 0 0 0 0 0
34652- 0 0 0 0 0 0 0 0 0 10 10 10
34653- 38 38 38 86 86 86 50 50 50 6 6 6
34654-128 128 128 174 154 114 156 107 11 168 122 10
34655-198 155 10 184 144 12 197 138 11 200 144 11
34656-206 145 10 206 145 10 197 138 11 188 164 115
34657-195 195 195 198 198 198 174 174 174 14 14 14
34658- 2 2 6 22 22 22 116 116 116 116 116 116
34659- 22 22 22 2 2 6 74 74 74 70 70 70
34660- 30 30 30 10 10 10 0 0 0 0 0 0
34661- 0 0 0 0 0 0 0 0 0 0 0 0
34662- 0 0 0 0 0 0 0 0 0 0 0 0
34663- 0 0 0 0 0 0 0 0 0 0 0 0
34664- 0 0 0 0 0 0 0 0 0 0 0 0
34665- 0 0 0 0 0 0 0 0 0 0 0 0
34666- 0 0 0 0 0 0 0 0 0 0 0 0
34667- 0 0 0 0 0 0 0 0 0 0 0 0
34668- 0 0 0 0 0 0 0 0 0 0 0 0
34669- 0 0 0 0 0 0 0 0 0 0 0 0
34670- 0 0 0 0 0 0 0 0 0 0 0 0
34671- 0 0 0 0 0 0 0 0 0 0 0 0
34672- 0 0 0 0 0 0 6 6 6 18 18 18
34673- 50 50 50 101 101 101 26 26 26 10 10 10
34674-138 138 138 190 190 190 174 154 114 156 107 11
34675-197 138 11 200 144 11 197 138 11 192 133 9
34676-180 123 7 190 142 34 190 178 144 187 187 187
34677-202 202 202 221 221 221 214 214 214 66 66 66
34678- 2 2 6 2 2 6 50 50 50 62 62 62
34679- 6 6 6 2 2 6 10 10 10 90 90 90
34680- 50 50 50 18 18 18 6 6 6 0 0 0
34681- 0 0 0 0 0 0 0 0 0 0 0 0
34682- 0 0 0 0 0 0 0 0 0 0 0 0
34683- 0 0 0 0 0 0 0 0 0 0 0 0
34684- 0 0 0 0 0 0 0 0 0 0 0 0
34685- 0 0 0 0 0 0 0 0 0 0 0 0
34686- 0 0 0 0 0 0 0 0 0 0 0 0
34687- 0 0 0 0 0 0 0 0 0 0 0 0
34688- 0 0 0 0 0 0 0 0 0 0 0 0
34689- 0 0 0 0 0 0 0 0 0 0 0 0
34690- 0 0 0 0 0 0 0 0 0 0 0 0
34691- 0 0 0 0 0 0 0 0 0 0 0 0
34692- 0 0 0 0 0 0 10 10 10 34 34 34
34693- 74 74 74 74 74 74 2 2 6 6 6 6
34694-144 144 144 198 198 198 190 190 190 178 166 146
34695-154 121 60 156 107 11 156 107 11 168 124 44
34696-174 154 114 187 187 187 190 190 190 210 210 210
34697-246 246 246 253 253 253 253 253 253 182 182 182
34698- 6 6 6 2 2 6 2 2 6 2 2 6
34699- 2 2 6 2 2 6 2 2 6 62 62 62
34700- 74 74 74 34 34 34 14 14 14 0 0 0
34701- 0 0 0 0 0 0 0 0 0 0 0 0
34702- 0 0 0 0 0 0 0 0 0 0 0 0
34703- 0 0 0 0 0 0 0 0 0 0 0 0
34704- 0 0 0 0 0 0 0 0 0 0 0 0
34705- 0 0 0 0 0 0 0 0 0 0 0 0
34706- 0 0 0 0 0 0 0 0 0 0 0 0
34707- 0 0 0 0 0 0 0 0 0 0 0 0
34708- 0 0 0 0 0 0 0 0 0 0 0 0
34709- 0 0 0 0 0 0 0 0 0 0 0 0
34710- 0 0 0 0 0 0 0 0 0 0 0 0
34711- 0 0 0 0 0 0 0 0 0 0 0 0
34712- 0 0 0 10 10 10 22 22 22 54 54 54
34713- 94 94 94 18 18 18 2 2 6 46 46 46
34714-234 234 234 221 221 221 190 190 190 190 190 190
34715-190 190 190 187 187 187 187 187 187 190 190 190
34716-190 190 190 195 195 195 214 214 214 242 242 242
34717-253 253 253 253 253 253 253 253 253 253 253 253
34718- 82 82 82 2 2 6 2 2 6 2 2 6
34719- 2 2 6 2 2 6 2 2 6 14 14 14
34720- 86 86 86 54 54 54 22 22 22 6 6 6
34721- 0 0 0 0 0 0 0 0 0 0 0 0
34722- 0 0 0 0 0 0 0 0 0 0 0 0
34723- 0 0 0 0 0 0 0 0 0 0 0 0
34724- 0 0 0 0 0 0 0 0 0 0 0 0
34725- 0 0 0 0 0 0 0 0 0 0 0 0
34726- 0 0 0 0 0 0 0 0 0 0 0 0
34727- 0 0 0 0 0 0 0 0 0 0 0 0
34728- 0 0 0 0 0 0 0 0 0 0 0 0
34729- 0 0 0 0 0 0 0 0 0 0 0 0
34730- 0 0 0 0 0 0 0 0 0 0 0 0
34731- 0 0 0 0 0 0 0 0 0 0 0 0
34732- 6 6 6 18 18 18 46 46 46 90 90 90
34733- 46 46 46 18 18 18 6 6 6 182 182 182
34734-253 253 253 246 246 246 206 206 206 190 190 190
34735-190 190 190 190 190 190 190 190 190 190 190 190
34736-206 206 206 231 231 231 250 250 250 253 253 253
34737-253 253 253 253 253 253 253 253 253 253 253 253
34738-202 202 202 14 14 14 2 2 6 2 2 6
34739- 2 2 6 2 2 6 2 2 6 2 2 6
34740- 42 42 42 86 86 86 42 42 42 18 18 18
34741- 6 6 6 0 0 0 0 0 0 0 0 0
34742- 0 0 0 0 0 0 0 0 0 0 0 0
34743- 0 0 0 0 0 0 0 0 0 0 0 0
34744- 0 0 0 0 0 0 0 0 0 0 0 0
34745- 0 0 0 0 0 0 0 0 0 0 0 0
34746- 0 0 0 0 0 0 0 0 0 0 0 0
34747- 0 0 0 0 0 0 0 0 0 0 0 0
34748- 0 0 0 0 0 0 0 0 0 0 0 0
34749- 0 0 0 0 0 0 0 0 0 0 0 0
34750- 0 0 0 0 0 0 0 0 0 0 0 0
34751- 0 0 0 0 0 0 0 0 0 6 6 6
34752- 14 14 14 38 38 38 74 74 74 66 66 66
34753- 2 2 6 6 6 6 90 90 90 250 250 250
34754-253 253 253 253 253 253 238 238 238 198 198 198
34755-190 190 190 190 190 190 195 195 195 221 221 221
34756-246 246 246 253 253 253 253 253 253 253 253 253
34757-253 253 253 253 253 253 253 253 253 253 253 253
34758-253 253 253 82 82 82 2 2 6 2 2 6
34759- 2 2 6 2 2 6 2 2 6 2 2 6
34760- 2 2 6 78 78 78 70 70 70 34 34 34
34761- 14 14 14 6 6 6 0 0 0 0 0 0
34762- 0 0 0 0 0 0 0 0 0 0 0 0
34763- 0 0 0 0 0 0 0 0 0 0 0 0
34764- 0 0 0 0 0 0 0 0 0 0 0 0
34765- 0 0 0 0 0 0 0 0 0 0 0 0
34766- 0 0 0 0 0 0 0 0 0 0 0 0
34767- 0 0 0 0 0 0 0 0 0 0 0 0
34768- 0 0 0 0 0 0 0 0 0 0 0 0
34769- 0 0 0 0 0 0 0 0 0 0 0 0
34770- 0 0 0 0 0 0 0 0 0 0 0 0
34771- 0 0 0 0 0 0 0 0 0 14 14 14
34772- 34 34 34 66 66 66 78 78 78 6 6 6
34773- 2 2 6 18 18 18 218 218 218 253 253 253
34774-253 253 253 253 253 253 253 253 253 246 246 246
34775-226 226 226 231 231 231 246 246 246 253 253 253
34776-253 253 253 253 253 253 253 253 253 253 253 253
34777-253 253 253 253 253 253 253 253 253 253 253 253
34778-253 253 253 178 178 178 2 2 6 2 2 6
34779- 2 2 6 2 2 6 2 2 6 2 2 6
34780- 2 2 6 18 18 18 90 90 90 62 62 62
34781- 30 30 30 10 10 10 0 0 0 0 0 0
34782- 0 0 0 0 0 0 0 0 0 0 0 0
34783- 0 0 0 0 0 0 0 0 0 0 0 0
34784- 0 0 0 0 0 0 0 0 0 0 0 0
34785- 0 0 0 0 0 0 0 0 0 0 0 0
34786- 0 0 0 0 0 0 0 0 0 0 0 0
34787- 0 0 0 0 0 0 0 0 0 0 0 0
34788- 0 0 0 0 0 0 0 0 0 0 0 0
34789- 0 0 0 0 0 0 0 0 0 0 0 0
34790- 0 0 0 0 0 0 0 0 0 0 0 0
34791- 0 0 0 0 0 0 10 10 10 26 26 26
34792- 58 58 58 90 90 90 18 18 18 2 2 6
34793- 2 2 6 110 110 110 253 253 253 253 253 253
34794-253 253 253 253 253 253 253 253 253 253 253 253
34795-250 250 250 253 253 253 253 253 253 253 253 253
34796-253 253 253 253 253 253 253 253 253 253 253 253
34797-253 253 253 253 253 253 253 253 253 253 253 253
34798-253 253 253 231 231 231 18 18 18 2 2 6
34799- 2 2 6 2 2 6 2 2 6 2 2 6
34800- 2 2 6 2 2 6 18 18 18 94 94 94
34801- 54 54 54 26 26 26 10 10 10 0 0 0
34802- 0 0 0 0 0 0 0 0 0 0 0 0
34803- 0 0 0 0 0 0 0 0 0 0 0 0
34804- 0 0 0 0 0 0 0 0 0 0 0 0
34805- 0 0 0 0 0 0 0 0 0 0 0 0
34806- 0 0 0 0 0 0 0 0 0 0 0 0
34807- 0 0 0 0 0 0 0 0 0 0 0 0
34808- 0 0 0 0 0 0 0 0 0 0 0 0
34809- 0 0 0 0 0 0 0 0 0 0 0 0
34810- 0 0 0 0 0 0 0 0 0 0 0 0
34811- 0 0 0 6 6 6 22 22 22 50 50 50
34812- 90 90 90 26 26 26 2 2 6 2 2 6
34813- 14 14 14 195 195 195 250 250 250 253 253 253
34814-253 253 253 253 253 253 253 253 253 253 253 253
34815-253 253 253 253 253 253 253 253 253 253 253 253
34816-253 253 253 253 253 253 253 253 253 253 253 253
34817-253 253 253 253 253 253 253 253 253 253 253 253
34818-250 250 250 242 242 242 54 54 54 2 2 6
34819- 2 2 6 2 2 6 2 2 6 2 2 6
34820- 2 2 6 2 2 6 2 2 6 38 38 38
34821- 86 86 86 50 50 50 22 22 22 6 6 6
34822- 0 0 0 0 0 0 0 0 0 0 0 0
34823- 0 0 0 0 0 0 0 0 0 0 0 0
34824- 0 0 0 0 0 0 0 0 0 0 0 0
34825- 0 0 0 0 0 0 0 0 0 0 0 0
34826- 0 0 0 0 0 0 0 0 0 0 0 0
34827- 0 0 0 0 0 0 0 0 0 0 0 0
34828- 0 0 0 0 0 0 0 0 0 0 0 0
34829- 0 0 0 0 0 0 0 0 0 0 0 0
34830- 0 0 0 0 0 0 0 0 0 0 0 0
34831- 6 6 6 14 14 14 38 38 38 82 82 82
34832- 34 34 34 2 2 6 2 2 6 2 2 6
34833- 42 42 42 195 195 195 246 246 246 253 253 253
34834-253 253 253 253 253 253 253 253 253 250 250 250
34835-242 242 242 242 242 242 250 250 250 253 253 253
34836-253 253 253 253 253 253 253 253 253 253 253 253
34837-253 253 253 250 250 250 246 246 246 238 238 238
34838-226 226 226 231 231 231 101 101 101 6 6 6
34839- 2 2 6 2 2 6 2 2 6 2 2 6
34840- 2 2 6 2 2 6 2 2 6 2 2 6
34841- 38 38 38 82 82 82 42 42 42 14 14 14
34842- 6 6 6 0 0 0 0 0 0 0 0 0
34843- 0 0 0 0 0 0 0 0 0 0 0 0
34844- 0 0 0 0 0 0 0 0 0 0 0 0
34845- 0 0 0 0 0 0 0 0 0 0 0 0
34846- 0 0 0 0 0 0 0 0 0 0 0 0
34847- 0 0 0 0 0 0 0 0 0 0 0 0
34848- 0 0 0 0 0 0 0 0 0 0 0 0
34849- 0 0 0 0 0 0 0 0 0 0 0 0
34850- 0 0 0 0 0 0 0 0 0 0 0 0
34851- 10 10 10 26 26 26 62 62 62 66 66 66
34852- 2 2 6 2 2 6 2 2 6 6 6 6
34853- 70 70 70 170 170 170 206 206 206 234 234 234
34854-246 246 246 250 250 250 250 250 250 238 238 238
34855-226 226 226 231 231 231 238 238 238 250 250 250
34856-250 250 250 250 250 250 246 246 246 231 231 231
34857-214 214 214 206 206 206 202 202 202 202 202 202
34858-198 198 198 202 202 202 182 182 182 18 18 18
34859- 2 2 6 2 2 6 2 2 6 2 2 6
34860- 2 2 6 2 2 6 2 2 6 2 2 6
34861- 2 2 6 62 62 62 66 66 66 30 30 30
34862- 10 10 10 0 0 0 0 0 0 0 0 0
34863- 0 0 0 0 0 0 0 0 0 0 0 0
34864- 0 0 0 0 0 0 0 0 0 0 0 0
34865- 0 0 0 0 0 0 0 0 0 0 0 0
34866- 0 0 0 0 0 0 0 0 0 0 0 0
34867- 0 0 0 0 0 0 0 0 0 0 0 0
34868- 0 0 0 0 0 0 0 0 0 0 0 0
34869- 0 0 0 0 0 0 0 0 0 0 0 0
34870- 0 0 0 0 0 0 0 0 0 0 0 0
34871- 14 14 14 42 42 42 82 82 82 18 18 18
34872- 2 2 6 2 2 6 2 2 6 10 10 10
34873- 94 94 94 182 182 182 218 218 218 242 242 242
34874-250 250 250 253 253 253 253 253 253 250 250 250
34875-234 234 234 253 253 253 253 253 253 253 253 253
34876-253 253 253 253 253 253 253 253 253 246 246 246
34877-238 238 238 226 226 226 210 210 210 202 202 202
34878-195 195 195 195 195 195 210 210 210 158 158 158
34879- 6 6 6 14 14 14 50 50 50 14 14 14
34880- 2 2 6 2 2 6 2 2 6 2 2 6
34881- 2 2 6 6 6 6 86 86 86 46 46 46
34882- 18 18 18 6 6 6 0 0 0 0 0 0
34883- 0 0 0 0 0 0 0 0 0 0 0 0
34884- 0 0 0 0 0 0 0 0 0 0 0 0
34885- 0 0 0 0 0 0 0 0 0 0 0 0
34886- 0 0 0 0 0 0 0 0 0 0 0 0
34887- 0 0 0 0 0 0 0 0 0 0 0 0
34888- 0 0 0 0 0 0 0 0 0 0 0 0
34889- 0 0 0 0 0 0 0 0 0 0 0 0
34890- 0 0 0 0 0 0 0 0 0 6 6 6
34891- 22 22 22 54 54 54 70 70 70 2 2 6
34892- 2 2 6 10 10 10 2 2 6 22 22 22
34893-166 166 166 231 231 231 250 250 250 253 253 253
34894-253 253 253 253 253 253 253 253 253 250 250 250
34895-242 242 242 253 253 253 253 253 253 253 253 253
34896-253 253 253 253 253 253 253 253 253 253 253 253
34897-253 253 253 253 253 253 253 253 253 246 246 246
34898-231 231 231 206 206 206 198 198 198 226 226 226
34899- 94 94 94 2 2 6 6 6 6 38 38 38
34900- 30 30 30 2 2 6 2 2 6 2 2 6
34901- 2 2 6 2 2 6 62 62 62 66 66 66
34902- 26 26 26 10 10 10 0 0 0 0 0 0
34903- 0 0 0 0 0 0 0 0 0 0 0 0
34904- 0 0 0 0 0 0 0 0 0 0 0 0
34905- 0 0 0 0 0 0 0 0 0 0 0 0
34906- 0 0 0 0 0 0 0 0 0 0 0 0
34907- 0 0 0 0 0 0 0 0 0 0 0 0
34908- 0 0 0 0 0 0 0 0 0 0 0 0
34909- 0 0 0 0 0 0 0 0 0 0 0 0
34910- 0 0 0 0 0 0 0 0 0 10 10 10
34911- 30 30 30 74 74 74 50 50 50 2 2 6
34912- 26 26 26 26 26 26 2 2 6 106 106 106
34913-238 238 238 253 253 253 253 253 253 253 253 253
34914-253 253 253 253 253 253 253 253 253 253 253 253
34915-253 253 253 253 253 253 253 253 253 253 253 253
34916-253 253 253 253 253 253 253 253 253 253 253 253
34917-253 253 253 253 253 253 253 253 253 253 253 253
34918-253 253 253 246 246 246 218 218 218 202 202 202
34919-210 210 210 14 14 14 2 2 6 2 2 6
34920- 30 30 30 22 22 22 2 2 6 2 2 6
34921- 2 2 6 2 2 6 18 18 18 86 86 86
34922- 42 42 42 14 14 14 0 0 0 0 0 0
34923- 0 0 0 0 0 0 0 0 0 0 0 0
34924- 0 0 0 0 0 0 0 0 0 0 0 0
34925- 0 0 0 0 0 0 0 0 0 0 0 0
34926- 0 0 0 0 0 0 0 0 0 0 0 0
34927- 0 0 0 0 0 0 0 0 0 0 0 0
34928- 0 0 0 0 0 0 0 0 0 0 0 0
34929- 0 0 0 0 0 0 0 0 0 0 0 0
34930- 0 0 0 0 0 0 0 0 0 14 14 14
34931- 42 42 42 90 90 90 22 22 22 2 2 6
34932- 42 42 42 2 2 6 18 18 18 218 218 218
34933-253 253 253 253 253 253 253 253 253 253 253 253
34934-253 253 253 253 253 253 253 253 253 253 253 253
34935-253 253 253 253 253 253 253 253 253 253 253 253
34936-253 253 253 253 253 253 253 253 253 253 253 253
34937-253 253 253 253 253 253 253 253 253 253 253 253
34938-253 253 253 253 253 253 250 250 250 221 221 221
34939-218 218 218 101 101 101 2 2 6 14 14 14
34940- 18 18 18 38 38 38 10 10 10 2 2 6
34941- 2 2 6 2 2 6 2 2 6 78 78 78
34942- 58 58 58 22 22 22 6 6 6 0 0 0
34943- 0 0 0 0 0 0 0 0 0 0 0 0
34944- 0 0 0 0 0 0 0 0 0 0 0 0
34945- 0 0 0 0 0 0 0 0 0 0 0 0
34946- 0 0 0 0 0 0 0 0 0 0 0 0
34947- 0 0 0 0 0 0 0 0 0 0 0 0
34948- 0 0 0 0 0 0 0 0 0 0 0 0
34949- 0 0 0 0 0 0 0 0 0 0 0 0
34950- 0 0 0 0 0 0 6 6 6 18 18 18
34951- 54 54 54 82 82 82 2 2 6 26 26 26
34952- 22 22 22 2 2 6 123 123 123 253 253 253
34953-253 253 253 253 253 253 253 253 253 253 253 253
34954-253 253 253 253 253 253 253 253 253 253 253 253
34955-253 253 253 253 253 253 253 253 253 253 253 253
34956-253 253 253 253 253 253 253 253 253 253 253 253
34957-253 253 253 253 253 253 253 253 253 253 253 253
34958-253 253 253 253 253 253 253 253 253 250 250 250
34959-238 238 238 198 198 198 6 6 6 38 38 38
34960- 58 58 58 26 26 26 38 38 38 2 2 6
34961- 2 2 6 2 2 6 2 2 6 46 46 46
34962- 78 78 78 30 30 30 10 10 10 0 0 0
34963- 0 0 0 0 0 0 0 0 0 0 0 0
34964- 0 0 0 0 0 0 0 0 0 0 0 0
34965- 0 0 0 0 0 0 0 0 0 0 0 0
34966- 0 0 0 0 0 0 0 0 0 0 0 0
34967- 0 0 0 0 0 0 0 0 0 0 0 0
34968- 0 0 0 0 0 0 0 0 0 0 0 0
34969- 0 0 0 0 0 0 0 0 0 0 0 0
34970- 0 0 0 0 0 0 10 10 10 30 30 30
34971- 74 74 74 58 58 58 2 2 6 42 42 42
34972- 2 2 6 22 22 22 231 231 231 253 253 253
34973-253 253 253 253 253 253 253 253 253 253 253 253
34974-253 253 253 253 253 253 253 253 253 250 250 250
34975-253 253 253 253 253 253 253 253 253 253 253 253
34976-253 253 253 253 253 253 253 253 253 253 253 253
34977-253 253 253 253 253 253 253 253 253 253 253 253
34978-253 253 253 253 253 253 253 253 253 253 253 253
34979-253 253 253 246 246 246 46 46 46 38 38 38
34980- 42 42 42 14 14 14 38 38 38 14 14 14
34981- 2 2 6 2 2 6 2 2 6 6 6 6
34982- 86 86 86 46 46 46 14 14 14 0 0 0
34983- 0 0 0 0 0 0 0 0 0 0 0 0
34984- 0 0 0 0 0 0 0 0 0 0 0 0
34985- 0 0 0 0 0 0 0 0 0 0 0 0
34986- 0 0 0 0 0 0 0 0 0 0 0 0
34987- 0 0 0 0 0 0 0 0 0 0 0 0
34988- 0 0 0 0 0 0 0 0 0 0 0 0
34989- 0 0 0 0 0 0 0 0 0 0 0 0
34990- 0 0 0 6 6 6 14 14 14 42 42 42
34991- 90 90 90 18 18 18 18 18 18 26 26 26
34992- 2 2 6 116 116 116 253 253 253 253 253 253
34993-253 253 253 253 253 253 253 253 253 253 253 253
34994-253 253 253 253 253 253 250 250 250 238 238 238
34995-253 253 253 253 253 253 253 253 253 253 253 253
34996-253 253 253 253 253 253 253 253 253 253 253 253
34997-253 253 253 253 253 253 253 253 253 253 253 253
34998-253 253 253 253 253 253 253 253 253 253 253 253
34999-253 253 253 253 253 253 94 94 94 6 6 6
35000- 2 2 6 2 2 6 10 10 10 34 34 34
35001- 2 2 6 2 2 6 2 2 6 2 2 6
35002- 74 74 74 58 58 58 22 22 22 6 6 6
35003- 0 0 0 0 0 0 0 0 0 0 0 0
35004- 0 0 0 0 0 0 0 0 0 0 0 0
35005- 0 0 0 0 0 0 0 0 0 0 0 0
35006- 0 0 0 0 0 0 0 0 0 0 0 0
35007- 0 0 0 0 0 0 0 0 0 0 0 0
35008- 0 0 0 0 0 0 0 0 0 0 0 0
35009- 0 0 0 0 0 0 0 0 0 0 0 0
35010- 0 0 0 10 10 10 26 26 26 66 66 66
35011- 82 82 82 2 2 6 38 38 38 6 6 6
35012- 14 14 14 210 210 210 253 253 253 253 253 253
35013-253 253 253 253 253 253 253 253 253 253 253 253
35014-253 253 253 253 253 253 246 246 246 242 242 242
35015-253 253 253 253 253 253 253 253 253 253 253 253
35016-253 253 253 253 253 253 253 253 253 253 253 253
35017-253 253 253 253 253 253 253 253 253 253 253 253
35018-253 253 253 253 253 253 253 253 253 253 253 253
35019-253 253 253 253 253 253 144 144 144 2 2 6
35020- 2 2 6 2 2 6 2 2 6 46 46 46
35021- 2 2 6 2 2 6 2 2 6 2 2 6
35022- 42 42 42 74 74 74 30 30 30 10 10 10
35023- 0 0 0 0 0 0 0 0 0 0 0 0
35024- 0 0 0 0 0 0 0 0 0 0 0 0
35025- 0 0 0 0 0 0 0 0 0 0 0 0
35026- 0 0 0 0 0 0 0 0 0 0 0 0
35027- 0 0 0 0 0 0 0 0 0 0 0 0
35028- 0 0 0 0 0 0 0 0 0 0 0 0
35029- 0 0 0 0 0 0 0 0 0 0 0 0
35030- 6 6 6 14 14 14 42 42 42 90 90 90
35031- 26 26 26 6 6 6 42 42 42 2 2 6
35032- 74 74 74 250 250 250 253 253 253 253 253 253
35033-253 253 253 253 253 253 253 253 253 253 253 253
35034-253 253 253 253 253 253 242 242 242 242 242 242
35035-253 253 253 253 253 253 253 253 253 253 253 253
35036-253 253 253 253 253 253 253 253 253 253 253 253
35037-253 253 253 253 253 253 253 253 253 253 253 253
35038-253 253 253 253 253 253 253 253 253 253 253 253
35039-253 253 253 253 253 253 182 182 182 2 2 6
35040- 2 2 6 2 2 6 2 2 6 46 46 46
35041- 2 2 6 2 2 6 2 2 6 2 2 6
35042- 10 10 10 86 86 86 38 38 38 10 10 10
35043- 0 0 0 0 0 0 0 0 0 0 0 0
35044- 0 0 0 0 0 0 0 0 0 0 0 0
35045- 0 0 0 0 0 0 0 0 0 0 0 0
35046- 0 0 0 0 0 0 0 0 0 0 0 0
35047- 0 0 0 0 0 0 0 0 0 0 0 0
35048- 0 0 0 0 0 0 0 0 0 0 0 0
35049- 0 0 0 0 0 0 0 0 0 0 0 0
35050- 10 10 10 26 26 26 66 66 66 82 82 82
35051- 2 2 6 22 22 22 18 18 18 2 2 6
35052-149 149 149 253 253 253 253 253 253 253 253 253
35053-253 253 253 253 253 253 253 253 253 253 253 253
35054-253 253 253 253 253 253 234 234 234 242 242 242
35055-253 253 253 253 253 253 253 253 253 253 253 253
35056-253 253 253 253 253 253 253 253 253 253 253 253
35057-253 253 253 253 253 253 253 253 253 253 253 253
35058-253 253 253 253 253 253 253 253 253 253 253 253
35059-253 253 253 253 253 253 206 206 206 2 2 6
35060- 2 2 6 2 2 6 2 2 6 38 38 38
35061- 2 2 6 2 2 6 2 2 6 2 2 6
35062- 6 6 6 86 86 86 46 46 46 14 14 14
35063- 0 0 0 0 0 0 0 0 0 0 0 0
35064- 0 0 0 0 0 0 0 0 0 0 0 0
35065- 0 0 0 0 0 0 0 0 0 0 0 0
35066- 0 0 0 0 0 0 0 0 0 0 0 0
35067- 0 0 0 0 0 0 0 0 0 0 0 0
35068- 0 0 0 0 0 0 0 0 0 0 0 0
35069- 0 0 0 0 0 0 0 0 0 6 6 6
35070- 18 18 18 46 46 46 86 86 86 18 18 18
35071- 2 2 6 34 34 34 10 10 10 6 6 6
35072-210 210 210 253 253 253 253 253 253 253 253 253
35073-253 253 253 253 253 253 253 253 253 253 253 253
35074-253 253 253 253 253 253 234 234 234 242 242 242
35075-253 253 253 253 253 253 253 253 253 253 253 253
35076-253 253 253 253 253 253 253 253 253 253 253 253
35077-253 253 253 253 253 253 253 253 253 253 253 253
35078-253 253 253 253 253 253 253 253 253 253 253 253
35079-253 253 253 253 253 253 221 221 221 6 6 6
35080- 2 2 6 2 2 6 6 6 6 30 30 30
35081- 2 2 6 2 2 6 2 2 6 2 2 6
35082- 2 2 6 82 82 82 54 54 54 18 18 18
35083- 6 6 6 0 0 0 0 0 0 0 0 0
35084- 0 0 0 0 0 0 0 0 0 0 0 0
35085- 0 0 0 0 0 0 0 0 0 0 0 0
35086- 0 0 0 0 0 0 0 0 0 0 0 0
35087- 0 0 0 0 0 0 0 0 0 0 0 0
35088- 0 0 0 0 0 0 0 0 0 0 0 0
35089- 0 0 0 0 0 0 0 0 0 10 10 10
35090- 26 26 26 66 66 66 62 62 62 2 2 6
35091- 2 2 6 38 38 38 10 10 10 26 26 26
35092-238 238 238 253 253 253 253 253 253 253 253 253
35093-253 253 253 253 253 253 253 253 253 253 253 253
35094-253 253 253 253 253 253 231 231 231 238 238 238
35095-253 253 253 253 253 253 253 253 253 253 253 253
35096-253 253 253 253 253 253 253 253 253 253 253 253
35097-253 253 253 253 253 253 253 253 253 253 253 253
35098-253 253 253 253 253 253 253 253 253 253 253 253
35099-253 253 253 253 253 253 231 231 231 6 6 6
35100- 2 2 6 2 2 6 10 10 10 30 30 30
35101- 2 2 6 2 2 6 2 2 6 2 2 6
35102- 2 2 6 66 66 66 58 58 58 22 22 22
35103- 6 6 6 0 0 0 0 0 0 0 0 0
35104- 0 0 0 0 0 0 0 0 0 0 0 0
35105- 0 0 0 0 0 0 0 0 0 0 0 0
35106- 0 0 0 0 0 0 0 0 0 0 0 0
35107- 0 0 0 0 0 0 0 0 0 0 0 0
35108- 0 0 0 0 0 0 0 0 0 0 0 0
35109- 0 0 0 0 0 0 0 0 0 10 10 10
35110- 38 38 38 78 78 78 6 6 6 2 2 6
35111- 2 2 6 46 46 46 14 14 14 42 42 42
35112-246 246 246 253 253 253 253 253 253 253 253 253
35113-253 253 253 253 253 253 253 253 253 253 253 253
35114-253 253 253 253 253 253 231 231 231 242 242 242
35115-253 253 253 253 253 253 253 253 253 253 253 253
35116-253 253 253 253 253 253 253 253 253 253 253 253
35117-253 253 253 253 253 253 253 253 253 253 253 253
35118-253 253 253 253 253 253 253 253 253 253 253 253
35119-253 253 253 253 253 253 234 234 234 10 10 10
35120- 2 2 6 2 2 6 22 22 22 14 14 14
35121- 2 2 6 2 2 6 2 2 6 2 2 6
35122- 2 2 6 66 66 66 62 62 62 22 22 22
35123- 6 6 6 0 0 0 0 0 0 0 0 0
35124- 0 0 0 0 0 0 0 0 0 0 0 0
35125- 0 0 0 0 0 0 0 0 0 0 0 0
35126- 0 0 0 0 0 0 0 0 0 0 0 0
35127- 0 0 0 0 0 0 0 0 0 0 0 0
35128- 0 0 0 0 0 0 0 0 0 0 0 0
35129- 0 0 0 0 0 0 6 6 6 18 18 18
35130- 50 50 50 74 74 74 2 2 6 2 2 6
35131- 14 14 14 70 70 70 34 34 34 62 62 62
35132-250 250 250 253 253 253 253 253 253 253 253 253
35133-253 253 253 253 253 253 253 253 253 253 253 253
35134-253 253 253 253 253 253 231 231 231 246 246 246
35135-253 253 253 253 253 253 253 253 253 253 253 253
35136-253 253 253 253 253 253 253 253 253 253 253 253
35137-253 253 253 253 253 253 253 253 253 253 253 253
35138-253 253 253 253 253 253 253 253 253 253 253 253
35139-253 253 253 253 253 253 234 234 234 14 14 14
35140- 2 2 6 2 2 6 30 30 30 2 2 6
35141- 2 2 6 2 2 6 2 2 6 2 2 6
35142- 2 2 6 66 66 66 62 62 62 22 22 22
35143- 6 6 6 0 0 0 0 0 0 0 0 0
35144- 0 0 0 0 0 0 0 0 0 0 0 0
35145- 0 0 0 0 0 0 0 0 0 0 0 0
35146- 0 0 0 0 0 0 0 0 0 0 0 0
35147- 0 0 0 0 0 0 0 0 0 0 0 0
35148- 0 0 0 0 0 0 0 0 0 0 0 0
35149- 0 0 0 0 0 0 6 6 6 18 18 18
35150- 54 54 54 62 62 62 2 2 6 2 2 6
35151- 2 2 6 30 30 30 46 46 46 70 70 70
35152-250 250 250 253 253 253 253 253 253 253 253 253
35153-253 253 253 253 253 253 253 253 253 253 253 253
35154-253 253 253 253 253 253 231 231 231 246 246 246
35155-253 253 253 253 253 253 253 253 253 253 253 253
35156-253 253 253 253 253 253 253 253 253 253 253 253
35157-253 253 253 253 253 253 253 253 253 253 253 253
35158-253 253 253 253 253 253 253 253 253 253 253 253
35159-253 253 253 253 253 253 226 226 226 10 10 10
35160- 2 2 6 6 6 6 30 30 30 2 2 6
35161- 2 2 6 2 2 6 2 2 6 2 2 6
35162- 2 2 6 66 66 66 58 58 58 22 22 22
35163- 6 6 6 0 0 0 0 0 0 0 0 0
35164- 0 0 0 0 0 0 0 0 0 0 0 0
35165- 0 0 0 0 0 0 0 0 0 0 0 0
35166- 0 0 0 0 0 0 0 0 0 0 0 0
35167- 0 0 0 0 0 0 0 0 0 0 0 0
35168- 0 0 0 0 0 0 0 0 0 0 0 0
35169- 0 0 0 0 0 0 6 6 6 22 22 22
35170- 58 58 58 62 62 62 2 2 6 2 2 6
35171- 2 2 6 2 2 6 30 30 30 78 78 78
35172-250 250 250 253 253 253 253 253 253 253 253 253
35173-253 253 253 253 253 253 253 253 253 253 253 253
35174-253 253 253 253 253 253 231 231 231 246 246 246
35175-253 253 253 253 253 253 253 253 253 253 253 253
35176-253 253 253 253 253 253 253 253 253 253 253 253
35177-253 253 253 253 253 253 253 253 253 253 253 253
35178-253 253 253 253 253 253 253 253 253 253 253 253
35179-253 253 253 253 253 253 206 206 206 2 2 6
35180- 22 22 22 34 34 34 18 14 6 22 22 22
35181- 26 26 26 18 18 18 6 6 6 2 2 6
35182- 2 2 6 82 82 82 54 54 54 18 18 18
35183- 6 6 6 0 0 0 0 0 0 0 0 0
35184- 0 0 0 0 0 0 0 0 0 0 0 0
35185- 0 0 0 0 0 0 0 0 0 0 0 0
35186- 0 0 0 0 0 0 0 0 0 0 0 0
35187- 0 0 0 0 0 0 0 0 0 0 0 0
35188- 0 0 0 0 0 0 0 0 0 0 0 0
35189- 0 0 0 0 0 0 6 6 6 26 26 26
35190- 62 62 62 106 106 106 74 54 14 185 133 11
35191-210 162 10 121 92 8 6 6 6 62 62 62
35192-238 238 238 253 253 253 253 253 253 253 253 253
35193-253 253 253 253 253 253 253 253 253 253 253 253
35194-253 253 253 253 253 253 231 231 231 246 246 246
35195-253 253 253 253 253 253 253 253 253 253 253 253
35196-253 253 253 253 253 253 253 253 253 253 253 253
35197-253 253 253 253 253 253 253 253 253 253 253 253
35198-253 253 253 253 253 253 253 253 253 253 253 253
35199-253 253 253 253 253 253 158 158 158 18 18 18
35200- 14 14 14 2 2 6 2 2 6 2 2 6
35201- 6 6 6 18 18 18 66 66 66 38 38 38
35202- 6 6 6 94 94 94 50 50 50 18 18 18
35203- 6 6 6 0 0 0 0 0 0 0 0 0
35204- 0 0 0 0 0 0 0 0 0 0 0 0
35205- 0 0 0 0 0 0 0 0 0 0 0 0
35206- 0 0 0 0 0 0 0 0 0 0 0 0
35207- 0 0 0 0 0 0 0 0 0 0 0 0
35208- 0 0 0 0 0 0 0 0 0 6 6 6
35209- 10 10 10 10 10 10 18 18 18 38 38 38
35210- 78 78 78 142 134 106 216 158 10 242 186 14
35211-246 190 14 246 190 14 156 118 10 10 10 10
35212- 90 90 90 238 238 238 253 253 253 253 253 253
35213-253 253 253 253 253 253 253 253 253 253 253 253
35214-253 253 253 253 253 253 231 231 231 250 250 250
35215-253 253 253 253 253 253 253 253 253 253 253 253
35216-253 253 253 253 253 253 253 253 253 253 253 253
35217-253 253 253 253 253 253 253 253 253 253 253 253
35218-253 253 253 253 253 253 253 253 253 246 230 190
35219-238 204 91 238 204 91 181 142 44 37 26 9
35220- 2 2 6 2 2 6 2 2 6 2 2 6
35221- 2 2 6 2 2 6 38 38 38 46 46 46
35222- 26 26 26 106 106 106 54 54 54 18 18 18
35223- 6 6 6 0 0 0 0 0 0 0 0 0
35224- 0 0 0 0 0 0 0 0 0 0 0 0
35225- 0 0 0 0 0 0 0 0 0 0 0 0
35226- 0 0 0 0 0 0 0 0 0 0 0 0
35227- 0 0 0 0 0 0 0 0 0 0 0 0
35228- 0 0 0 6 6 6 14 14 14 22 22 22
35229- 30 30 30 38 38 38 50 50 50 70 70 70
35230-106 106 106 190 142 34 226 170 11 242 186 14
35231-246 190 14 246 190 14 246 190 14 154 114 10
35232- 6 6 6 74 74 74 226 226 226 253 253 253
35233-253 253 253 253 253 253 253 253 253 253 253 253
35234-253 253 253 253 253 253 231 231 231 250 250 250
35235-253 253 253 253 253 253 253 253 253 253 253 253
35236-253 253 253 253 253 253 253 253 253 253 253 253
35237-253 253 253 253 253 253 253 253 253 253 253 253
35238-253 253 253 253 253 253 253 253 253 228 184 62
35239-241 196 14 241 208 19 232 195 16 38 30 10
35240- 2 2 6 2 2 6 2 2 6 2 2 6
35241- 2 2 6 6 6 6 30 30 30 26 26 26
35242-203 166 17 154 142 90 66 66 66 26 26 26
35243- 6 6 6 0 0 0 0 0 0 0 0 0
35244- 0 0 0 0 0 0 0 0 0 0 0 0
35245- 0 0 0 0 0 0 0 0 0 0 0 0
35246- 0 0 0 0 0 0 0 0 0 0 0 0
35247- 0 0 0 0 0 0 0 0 0 0 0 0
35248- 6 6 6 18 18 18 38 38 38 58 58 58
35249- 78 78 78 86 86 86 101 101 101 123 123 123
35250-175 146 61 210 150 10 234 174 13 246 186 14
35251-246 190 14 246 190 14 246 190 14 238 190 10
35252-102 78 10 2 2 6 46 46 46 198 198 198
35253-253 253 253 253 253 253 253 253 253 253 253 253
35254-253 253 253 253 253 253 234 234 234 242 242 242
35255-253 253 253 253 253 253 253 253 253 253 253 253
35256-253 253 253 253 253 253 253 253 253 253 253 253
35257-253 253 253 253 253 253 253 253 253 253 253 253
35258-253 253 253 253 253 253 253 253 253 224 178 62
35259-242 186 14 241 196 14 210 166 10 22 18 6
35260- 2 2 6 2 2 6 2 2 6 2 2 6
35261- 2 2 6 2 2 6 6 6 6 121 92 8
35262-238 202 15 232 195 16 82 82 82 34 34 34
35263- 10 10 10 0 0 0 0 0 0 0 0 0
35264- 0 0 0 0 0 0 0 0 0 0 0 0
35265- 0 0 0 0 0 0 0 0 0 0 0 0
35266- 0 0 0 0 0 0 0 0 0 0 0 0
35267- 0 0 0 0 0 0 0 0 0 0 0 0
35268- 14 14 14 38 38 38 70 70 70 154 122 46
35269-190 142 34 200 144 11 197 138 11 197 138 11
35270-213 154 11 226 170 11 242 186 14 246 190 14
35271-246 190 14 246 190 14 246 190 14 246 190 14
35272-225 175 15 46 32 6 2 2 6 22 22 22
35273-158 158 158 250 250 250 253 253 253 253 253 253
35274-253 253 253 253 253 253 253 253 253 253 253 253
35275-253 253 253 253 253 253 253 253 253 253 253 253
35276-253 253 253 253 253 253 253 253 253 253 253 253
35277-253 253 253 253 253 253 253 253 253 253 253 253
35278-253 253 253 250 250 250 242 242 242 224 178 62
35279-239 182 13 236 186 11 213 154 11 46 32 6
35280- 2 2 6 2 2 6 2 2 6 2 2 6
35281- 2 2 6 2 2 6 61 42 6 225 175 15
35282-238 190 10 236 186 11 112 100 78 42 42 42
35283- 14 14 14 0 0 0 0 0 0 0 0 0
35284- 0 0 0 0 0 0 0 0 0 0 0 0
35285- 0 0 0 0 0 0 0 0 0 0 0 0
35286- 0 0 0 0 0 0 0 0 0 0 0 0
35287- 0 0 0 0 0 0 0 0 0 6 6 6
35288- 22 22 22 54 54 54 154 122 46 213 154 11
35289-226 170 11 230 174 11 226 170 11 226 170 11
35290-236 178 12 242 186 14 246 190 14 246 190 14
35291-246 190 14 246 190 14 246 190 14 246 190 14
35292-241 196 14 184 144 12 10 10 10 2 2 6
35293- 6 6 6 116 116 116 242 242 242 253 253 253
35294-253 253 253 253 253 253 253 253 253 253 253 253
35295-253 253 253 253 253 253 253 253 253 253 253 253
35296-253 253 253 253 253 253 253 253 253 253 253 253
35297-253 253 253 253 253 253 253 253 253 253 253 253
35298-253 253 253 231 231 231 198 198 198 214 170 54
35299-236 178 12 236 178 12 210 150 10 137 92 6
35300- 18 14 6 2 2 6 2 2 6 2 2 6
35301- 6 6 6 70 47 6 200 144 11 236 178 12
35302-239 182 13 239 182 13 124 112 88 58 58 58
35303- 22 22 22 6 6 6 0 0 0 0 0 0
35304- 0 0 0 0 0 0 0 0 0 0 0 0
35305- 0 0 0 0 0 0 0 0 0 0 0 0
35306- 0 0 0 0 0 0 0 0 0 0 0 0
35307- 0 0 0 0 0 0 0 0 0 10 10 10
35308- 30 30 30 70 70 70 180 133 36 226 170 11
35309-239 182 13 242 186 14 242 186 14 246 186 14
35310-246 190 14 246 190 14 246 190 14 246 190 14
35311-246 190 14 246 190 14 246 190 14 246 190 14
35312-246 190 14 232 195 16 98 70 6 2 2 6
35313- 2 2 6 2 2 6 66 66 66 221 221 221
35314-253 253 253 253 253 253 253 253 253 253 253 253
35315-253 253 253 253 253 253 253 253 253 253 253 253
35316-253 253 253 253 253 253 253 253 253 253 253 253
35317-253 253 253 253 253 253 253 253 253 253 253 253
35318-253 253 253 206 206 206 198 198 198 214 166 58
35319-230 174 11 230 174 11 216 158 10 192 133 9
35320-163 110 8 116 81 8 102 78 10 116 81 8
35321-167 114 7 197 138 11 226 170 11 239 182 13
35322-242 186 14 242 186 14 162 146 94 78 78 78
35323- 34 34 34 14 14 14 6 6 6 0 0 0
35324- 0 0 0 0 0 0 0 0 0 0 0 0
35325- 0 0 0 0 0 0 0 0 0 0 0 0
35326- 0 0 0 0 0 0 0 0 0 0 0 0
35327- 0 0 0 0 0 0 0 0 0 6 6 6
35328- 30 30 30 78 78 78 190 142 34 226 170 11
35329-239 182 13 246 190 14 246 190 14 246 190 14
35330-246 190 14 246 190 14 246 190 14 246 190 14
35331-246 190 14 246 190 14 246 190 14 246 190 14
35332-246 190 14 241 196 14 203 166 17 22 18 6
35333- 2 2 6 2 2 6 2 2 6 38 38 38
35334-218 218 218 253 253 253 253 253 253 253 253 253
35335-253 253 253 253 253 253 253 253 253 253 253 253
35336-253 253 253 253 253 253 253 253 253 253 253 253
35337-253 253 253 253 253 253 253 253 253 253 253 253
35338-250 250 250 206 206 206 198 198 198 202 162 69
35339-226 170 11 236 178 12 224 166 10 210 150 10
35340-200 144 11 197 138 11 192 133 9 197 138 11
35341-210 150 10 226 170 11 242 186 14 246 190 14
35342-246 190 14 246 186 14 225 175 15 124 112 88
35343- 62 62 62 30 30 30 14 14 14 6 6 6
35344- 0 0 0 0 0 0 0 0 0 0 0 0
35345- 0 0 0 0 0 0 0 0 0 0 0 0
35346- 0 0 0 0 0 0 0 0 0 0 0 0
35347- 0 0 0 0 0 0 0 0 0 10 10 10
35348- 30 30 30 78 78 78 174 135 50 224 166 10
35349-239 182 13 246 190 14 246 190 14 246 190 14
35350-246 190 14 246 190 14 246 190 14 246 190 14
35351-246 190 14 246 190 14 246 190 14 246 190 14
35352-246 190 14 246 190 14 241 196 14 139 102 15
35353- 2 2 6 2 2 6 2 2 6 2 2 6
35354- 78 78 78 250 250 250 253 253 253 253 253 253
35355-253 253 253 253 253 253 253 253 253 253 253 253
35356-253 253 253 253 253 253 253 253 253 253 253 253
35357-253 253 253 253 253 253 253 253 253 253 253 253
35358-250 250 250 214 214 214 198 198 198 190 150 46
35359-219 162 10 236 178 12 234 174 13 224 166 10
35360-216 158 10 213 154 11 213 154 11 216 158 10
35361-226 170 11 239 182 13 246 190 14 246 190 14
35362-246 190 14 246 190 14 242 186 14 206 162 42
35363-101 101 101 58 58 58 30 30 30 14 14 14
35364- 6 6 6 0 0 0 0 0 0 0 0 0
35365- 0 0 0 0 0 0 0 0 0 0 0 0
35366- 0 0 0 0 0 0 0 0 0 0 0 0
35367- 0 0 0 0 0 0 0 0 0 10 10 10
35368- 30 30 30 74 74 74 174 135 50 216 158 10
35369-236 178 12 246 190 14 246 190 14 246 190 14
35370-246 190 14 246 190 14 246 190 14 246 190 14
35371-246 190 14 246 190 14 246 190 14 246 190 14
35372-246 190 14 246 190 14 241 196 14 226 184 13
35373- 61 42 6 2 2 6 2 2 6 2 2 6
35374- 22 22 22 238 238 238 253 253 253 253 253 253
35375-253 253 253 253 253 253 253 253 253 253 253 253
35376-253 253 253 253 253 253 253 253 253 253 253 253
35377-253 253 253 253 253 253 253 253 253 253 253 253
35378-253 253 253 226 226 226 187 187 187 180 133 36
35379-216 158 10 236 178 12 239 182 13 236 178 12
35380-230 174 11 226 170 11 226 170 11 230 174 11
35381-236 178 12 242 186 14 246 190 14 246 190 14
35382-246 190 14 246 190 14 246 186 14 239 182 13
35383-206 162 42 106 106 106 66 66 66 34 34 34
35384- 14 14 14 6 6 6 0 0 0 0 0 0
35385- 0 0 0 0 0 0 0 0 0 0 0 0
35386- 0 0 0 0 0 0 0 0 0 0 0 0
35387- 0 0 0 0 0 0 0 0 0 6 6 6
35388- 26 26 26 70 70 70 163 133 67 213 154 11
35389-236 178 12 246 190 14 246 190 14 246 190 14
35390-246 190 14 246 190 14 246 190 14 246 190 14
35391-246 190 14 246 190 14 246 190 14 246 190 14
35392-246 190 14 246 190 14 246 190 14 241 196 14
35393-190 146 13 18 14 6 2 2 6 2 2 6
35394- 46 46 46 246 246 246 253 253 253 253 253 253
35395-253 253 253 253 253 253 253 253 253 253 253 253
35396-253 253 253 253 253 253 253 253 253 253 253 253
35397-253 253 253 253 253 253 253 253 253 253 253 253
35398-253 253 253 221 221 221 86 86 86 156 107 11
35399-216 158 10 236 178 12 242 186 14 246 186 14
35400-242 186 14 239 182 13 239 182 13 242 186 14
35401-242 186 14 246 186 14 246 190 14 246 190 14
35402-246 190 14 246 190 14 246 190 14 246 190 14
35403-242 186 14 225 175 15 142 122 72 66 66 66
35404- 30 30 30 10 10 10 0 0 0 0 0 0
35405- 0 0 0 0 0 0 0 0 0 0 0 0
35406- 0 0 0 0 0 0 0 0 0 0 0 0
35407- 0 0 0 0 0 0 0 0 0 6 6 6
35408- 26 26 26 70 70 70 163 133 67 210 150 10
35409-236 178 12 246 190 14 246 190 14 246 190 14
35410-246 190 14 246 190 14 246 190 14 246 190 14
35411-246 190 14 246 190 14 246 190 14 246 190 14
35412-246 190 14 246 190 14 246 190 14 246 190 14
35413-232 195 16 121 92 8 34 34 34 106 106 106
35414-221 221 221 253 253 253 253 253 253 253 253 253
35415-253 253 253 253 253 253 253 253 253 253 253 253
35416-253 253 253 253 253 253 253 253 253 253 253 253
35417-253 253 253 253 253 253 253 253 253 253 253 253
35418-242 242 242 82 82 82 18 14 6 163 110 8
35419-216 158 10 236 178 12 242 186 14 246 190 14
35420-246 190 14 246 190 14 246 190 14 246 190 14
35421-246 190 14 246 190 14 246 190 14 246 190 14
35422-246 190 14 246 190 14 246 190 14 246 190 14
35423-246 190 14 246 190 14 242 186 14 163 133 67
35424- 46 46 46 18 18 18 6 6 6 0 0 0
35425- 0 0 0 0 0 0 0 0 0 0 0 0
35426- 0 0 0 0 0 0 0 0 0 0 0 0
35427- 0 0 0 0 0 0 0 0 0 10 10 10
35428- 30 30 30 78 78 78 163 133 67 210 150 10
35429-236 178 12 246 186 14 246 190 14 246 190 14
35430-246 190 14 246 190 14 246 190 14 246 190 14
35431-246 190 14 246 190 14 246 190 14 246 190 14
35432-246 190 14 246 190 14 246 190 14 246 190 14
35433-241 196 14 215 174 15 190 178 144 253 253 253
35434-253 253 253 253 253 253 253 253 253 253 253 253
35435-253 253 253 253 253 253 253 253 253 253 253 253
35436-253 253 253 253 253 253 253 253 253 253 253 253
35437-253 253 253 253 253 253 253 253 253 218 218 218
35438- 58 58 58 2 2 6 22 18 6 167 114 7
35439-216 158 10 236 178 12 246 186 14 246 190 14
35440-246 190 14 246 190 14 246 190 14 246 190 14
35441-246 190 14 246 190 14 246 190 14 246 190 14
35442-246 190 14 246 190 14 246 190 14 246 190 14
35443-246 190 14 246 186 14 242 186 14 190 150 46
35444- 54 54 54 22 22 22 6 6 6 0 0 0
35445- 0 0 0 0 0 0 0 0 0 0 0 0
35446- 0 0 0 0 0 0 0 0 0 0 0 0
35447- 0 0 0 0 0 0 0 0 0 14 14 14
35448- 38 38 38 86 86 86 180 133 36 213 154 11
35449-236 178 12 246 186 14 246 190 14 246 190 14
35450-246 190 14 246 190 14 246 190 14 246 190 14
35451-246 190 14 246 190 14 246 190 14 246 190 14
35452-246 190 14 246 190 14 246 190 14 246 190 14
35453-246 190 14 232 195 16 190 146 13 214 214 214
35454-253 253 253 253 253 253 253 253 253 253 253 253
35455-253 253 253 253 253 253 253 253 253 253 253 253
35456-253 253 253 253 253 253 253 253 253 253 253 253
35457-253 253 253 250 250 250 170 170 170 26 26 26
35458- 2 2 6 2 2 6 37 26 9 163 110 8
35459-219 162 10 239 182 13 246 186 14 246 190 14
35460-246 190 14 246 190 14 246 190 14 246 190 14
35461-246 190 14 246 190 14 246 190 14 246 190 14
35462-246 190 14 246 190 14 246 190 14 246 190 14
35463-246 186 14 236 178 12 224 166 10 142 122 72
35464- 46 46 46 18 18 18 6 6 6 0 0 0
35465- 0 0 0 0 0 0 0 0 0 0 0 0
35466- 0 0 0 0 0 0 0 0 0 0 0 0
35467- 0 0 0 0 0 0 6 6 6 18 18 18
35468- 50 50 50 109 106 95 192 133 9 224 166 10
35469-242 186 14 246 190 14 246 190 14 246 190 14
35470-246 190 14 246 190 14 246 190 14 246 190 14
35471-246 190 14 246 190 14 246 190 14 246 190 14
35472-246 190 14 246 190 14 246 190 14 246 190 14
35473-242 186 14 226 184 13 210 162 10 142 110 46
35474-226 226 226 253 253 253 253 253 253 253 253 253
35475-253 253 253 253 253 253 253 253 253 253 253 253
35476-253 253 253 253 253 253 253 253 253 253 253 253
35477-198 198 198 66 66 66 2 2 6 2 2 6
35478- 2 2 6 2 2 6 50 34 6 156 107 11
35479-219 162 10 239 182 13 246 186 14 246 190 14
35480-246 190 14 246 190 14 246 190 14 246 190 14
35481-246 190 14 246 190 14 246 190 14 246 190 14
35482-246 190 14 246 190 14 246 190 14 242 186 14
35483-234 174 13 213 154 11 154 122 46 66 66 66
35484- 30 30 30 10 10 10 0 0 0 0 0 0
35485- 0 0 0 0 0 0 0 0 0 0 0 0
35486- 0 0 0 0 0 0 0 0 0 0 0 0
35487- 0 0 0 0 0 0 6 6 6 22 22 22
35488- 58 58 58 154 121 60 206 145 10 234 174 13
35489-242 186 14 246 186 14 246 190 14 246 190 14
35490-246 190 14 246 190 14 246 190 14 246 190 14
35491-246 190 14 246 190 14 246 190 14 246 190 14
35492-246 190 14 246 190 14 246 190 14 246 190 14
35493-246 186 14 236 178 12 210 162 10 163 110 8
35494- 61 42 6 138 138 138 218 218 218 250 250 250
35495-253 253 253 253 253 253 253 253 253 250 250 250
35496-242 242 242 210 210 210 144 144 144 66 66 66
35497- 6 6 6 2 2 6 2 2 6 2 2 6
35498- 2 2 6 2 2 6 61 42 6 163 110 8
35499-216 158 10 236 178 12 246 190 14 246 190 14
35500-246 190 14 246 190 14 246 190 14 246 190 14
35501-246 190 14 246 190 14 246 190 14 246 190 14
35502-246 190 14 239 182 13 230 174 11 216 158 10
35503-190 142 34 124 112 88 70 70 70 38 38 38
35504- 18 18 18 6 6 6 0 0 0 0 0 0
35505- 0 0 0 0 0 0 0 0 0 0 0 0
35506- 0 0 0 0 0 0 0 0 0 0 0 0
35507- 0 0 0 0 0 0 6 6 6 22 22 22
35508- 62 62 62 168 124 44 206 145 10 224 166 10
35509-236 178 12 239 182 13 242 186 14 242 186 14
35510-246 186 14 246 190 14 246 190 14 246 190 14
35511-246 190 14 246 190 14 246 190 14 246 190 14
35512-246 190 14 246 190 14 246 190 14 246 190 14
35513-246 190 14 236 178 12 216 158 10 175 118 6
35514- 80 54 7 2 2 6 6 6 6 30 30 30
35515- 54 54 54 62 62 62 50 50 50 38 38 38
35516- 14 14 14 2 2 6 2 2 6 2 2 6
35517- 2 2 6 2 2 6 2 2 6 2 2 6
35518- 2 2 6 6 6 6 80 54 7 167 114 7
35519-213 154 11 236 178 12 246 190 14 246 190 14
35520-246 190 14 246 190 14 246 190 14 246 190 14
35521-246 190 14 242 186 14 239 182 13 239 182 13
35522-230 174 11 210 150 10 174 135 50 124 112 88
35523- 82 82 82 54 54 54 34 34 34 18 18 18
35524- 6 6 6 0 0 0 0 0 0 0 0 0
35525- 0 0 0 0 0 0 0 0 0 0 0 0
35526- 0 0 0 0 0 0 0 0 0 0 0 0
35527- 0 0 0 0 0 0 6 6 6 18 18 18
35528- 50 50 50 158 118 36 192 133 9 200 144 11
35529-216 158 10 219 162 10 224 166 10 226 170 11
35530-230 174 11 236 178 12 239 182 13 239 182 13
35531-242 186 14 246 186 14 246 190 14 246 190 14
35532-246 190 14 246 190 14 246 190 14 246 190 14
35533-246 186 14 230 174 11 210 150 10 163 110 8
35534-104 69 6 10 10 10 2 2 6 2 2 6
35535- 2 2 6 2 2 6 2 2 6 2 2 6
35536- 2 2 6 2 2 6 2 2 6 2 2 6
35537- 2 2 6 2 2 6 2 2 6 2 2 6
35538- 2 2 6 6 6 6 91 60 6 167 114 7
35539-206 145 10 230 174 11 242 186 14 246 190 14
35540-246 190 14 246 190 14 246 186 14 242 186 14
35541-239 182 13 230 174 11 224 166 10 213 154 11
35542-180 133 36 124 112 88 86 86 86 58 58 58
35543- 38 38 38 22 22 22 10 10 10 6 6 6
35544- 0 0 0 0 0 0 0 0 0 0 0 0
35545- 0 0 0 0 0 0 0 0 0 0 0 0
35546- 0 0 0 0 0 0 0 0 0 0 0 0
35547- 0 0 0 0 0 0 0 0 0 14 14 14
35548- 34 34 34 70 70 70 138 110 50 158 118 36
35549-167 114 7 180 123 7 192 133 9 197 138 11
35550-200 144 11 206 145 10 213 154 11 219 162 10
35551-224 166 10 230 174 11 239 182 13 242 186 14
35552-246 186 14 246 186 14 246 186 14 246 186 14
35553-239 182 13 216 158 10 185 133 11 152 99 6
35554-104 69 6 18 14 6 2 2 6 2 2 6
35555- 2 2 6 2 2 6 2 2 6 2 2 6
35556- 2 2 6 2 2 6 2 2 6 2 2 6
35557- 2 2 6 2 2 6 2 2 6 2 2 6
35558- 2 2 6 6 6 6 80 54 7 152 99 6
35559-192 133 9 219 162 10 236 178 12 239 182 13
35560-246 186 14 242 186 14 239 182 13 236 178 12
35561-224 166 10 206 145 10 192 133 9 154 121 60
35562- 94 94 94 62 62 62 42 42 42 22 22 22
35563- 14 14 14 6 6 6 0 0 0 0 0 0
35564- 0 0 0 0 0 0 0 0 0 0 0 0
35565- 0 0 0 0 0 0 0 0 0 0 0 0
35566- 0 0 0 0 0 0 0 0 0 0 0 0
35567- 0 0 0 0 0 0 0 0 0 6 6 6
35568- 18 18 18 34 34 34 58 58 58 78 78 78
35569-101 98 89 124 112 88 142 110 46 156 107 11
35570-163 110 8 167 114 7 175 118 6 180 123 7
35571-185 133 11 197 138 11 210 150 10 219 162 10
35572-226 170 11 236 178 12 236 178 12 234 174 13
35573-219 162 10 197 138 11 163 110 8 130 83 6
35574- 91 60 6 10 10 10 2 2 6 2 2 6
35575- 18 18 18 38 38 38 38 38 38 38 38 38
35576- 38 38 38 38 38 38 38 38 38 38 38 38
35577- 38 38 38 38 38 38 26 26 26 2 2 6
35578- 2 2 6 6 6 6 70 47 6 137 92 6
35579-175 118 6 200 144 11 219 162 10 230 174 11
35580-234 174 13 230 174 11 219 162 10 210 150 10
35581-192 133 9 163 110 8 124 112 88 82 82 82
35582- 50 50 50 30 30 30 14 14 14 6 6 6
35583- 0 0 0 0 0 0 0 0 0 0 0 0
35584- 0 0 0 0 0 0 0 0 0 0 0 0
35585- 0 0 0 0 0 0 0 0 0 0 0 0
35586- 0 0 0 0 0 0 0 0 0 0 0 0
35587- 0 0 0 0 0 0 0 0 0 0 0 0
35588- 6 6 6 14 14 14 22 22 22 34 34 34
35589- 42 42 42 58 58 58 74 74 74 86 86 86
35590-101 98 89 122 102 70 130 98 46 121 87 25
35591-137 92 6 152 99 6 163 110 8 180 123 7
35592-185 133 11 197 138 11 206 145 10 200 144 11
35593-180 123 7 156 107 11 130 83 6 104 69 6
35594- 50 34 6 54 54 54 110 110 110 101 98 89
35595- 86 86 86 82 82 82 78 78 78 78 78 78
35596- 78 78 78 78 78 78 78 78 78 78 78 78
35597- 78 78 78 82 82 82 86 86 86 94 94 94
35598-106 106 106 101 101 101 86 66 34 124 80 6
35599-156 107 11 180 123 7 192 133 9 200 144 11
35600-206 145 10 200 144 11 192 133 9 175 118 6
35601-139 102 15 109 106 95 70 70 70 42 42 42
35602- 22 22 22 10 10 10 0 0 0 0 0 0
35603- 0 0 0 0 0 0 0 0 0 0 0 0
35604- 0 0 0 0 0 0 0 0 0 0 0 0
35605- 0 0 0 0 0 0 0 0 0 0 0 0
35606- 0 0 0 0 0 0 0 0 0 0 0 0
35607- 0 0 0 0 0 0 0 0 0 0 0 0
35608- 0 0 0 0 0 0 6 6 6 10 10 10
35609- 14 14 14 22 22 22 30 30 30 38 38 38
35610- 50 50 50 62 62 62 74 74 74 90 90 90
35611-101 98 89 112 100 78 121 87 25 124 80 6
35612-137 92 6 152 99 6 152 99 6 152 99 6
35613-138 86 6 124 80 6 98 70 6 86 66 30
35614-101 98 89 82 82 82 58 58 58 46 46 46
35615- 38 38 38 34 34 34 34 34 34 34 34 34
35616- 34 34 34 34 34 34 34 34 34 34 34 34
35617- 34 34 34 34 34 34 38 38 38 42 42 42
35618- 54 54 54 82 82 82 94 86 76 91 60 6
35619-134 86 6 156 107 11 167 114 7 175 118 6
35620-175 118 6 167 114 7 152 99 6 121 87 25
35621-101 98 89 62 62 62 34 34 34 18 18 18
35622- 6 6 6 0 0 0 0 0 0 0 0 0
35623- 0 0 0 0 0 0 0 0 0 0 0 0
35624- 0 0 0 0 0 0 0 0 0 0 0 0
35625- 0 0 0 0 0 0 0 0 0 0 0 0
35626- 0 0 0 0 0 0 0 0 0 0 0 0
35627- 0 0 0 0 0 0 0 0 0 0 0 0
35628- 0 0 0 0 0 0 0 0 0 0 0 0
35629- 0 0 0 6 6 6 6 6 6 10 10 10
35630- 18 18 18 22 22 22 30 30 30 42 42 42
35631- 50 50 50 66 66 66 86 86 86 101 98 89
35632-106 86 58 98 70 6 104 69 6 104 69 6
35633-104 69 6 91 60 6 82 62 34 90 90 90
35634- 62 62 62 38 38 38 22 22 22 14 14 14
35635- 10 10 10 10 10 10 10 10 10 10 10 10
35636- 10 10 10 10 10 10 6 6 6 10 10 10
35637- 10 10 10 10 10 10 10 10 10 14 14 14
35638- 22 22 22 42 42 42 70 70 70 89 81 66
35639- 80 54 7 104 69 6 124 80 6 137 92 6
35640-134 86 6 116 81 8 100 82 52 86 86 86
35641- 58 58 58 30 30 30 14 14 14 6 6 6
35642- 0 0 0 0 0 0 0 0 0 0 0 0
35643- 0 0 0 0 0 0 0 0 0 0 0 0
35644- 0 0 0 0 0 0 0 0 0 0 0 0
35645- 0 0 0 0 0 0 0 0 0 0 0 0
35646- 0 0 0 0 0 0 0 0 0 0 0 0
35647- 0 0 0 0 0 0 0 0 0 0 0 0
35648- 0 0 0 0 0 0 0 0 0 0 0 0
35649- 0 0 0 0 0 0 0 0 0 0 0 0
35650- 0 0 0 6 6 6 10 10 10 14 14 14
35651- 18 18 18 26 26 26 38 38 38 54 54 54
35652- 70 70 70 86 86 86 94 86 76 89 81 66
35653- 89 81 66 86 86 86 74 74 74 50 50 50
35654- 30 30 30 14 14 14 6 6 6 0 0 0
35655- 0 0 0 0 0 0 0 0 0 0 0 0
35656- 0 0 0 0 0 0 0 0 0 0 0 0
35657- 0 0 0 0 0 0 0 0 0 0 0 0
35658- 6 6 6 18 18 18 34 34 34 58 58 58
35659- 82 82 82 89 81 66 89 81 66 89 81 66
35660- 94 86 66 94 86 76 74 74 74 50 50 50
35661- 26 26 26 14 14 14 6 6 6 0 0 0
35662- 0 0 0 0 0 0 0 0 0 0 0 0
35663- 0 0 0 0 0 0 0 0 0 0 0 0
35664- 0 0 0 0 0 0 0 0 0 0 0 0
35665- 0 0 0 0 0 0 0 0 0 0 0 0
35666- 0 0 0 0 0 0 0 0 0 0 0 0
35667- 0 0 0 0 0 0 0 0 0 0 0 0
35668- 0 0 0 0 0 0 0 0 0 0 0 0
35669- 0 0 0 0 0 0 0 0 0 0 0 0
35670- 0 0 0 0 0 0 0 0 0 0 0 0
35671- 6 6 6 6 6 6 14 14 14 18 18 18
35672- 30 30 30 38 38 38 46 46 46 54 54 54
35673- 50 50 50 42 42 42 30 30 30 18 18 18
35674- 10 10 10 0 0 0 0 0 0 0 0 0
35675- 0 0 0 0 0 0 0 0 0 0 0 0
35676- 0 0 0 0 0 0 0 0 0 0 0 0
35677- 0 0 0 0 0 0 0 0 0 0 0 0
35678- 0 0 0 6 6 6 14 14 14 26 26 26
35679- 38 38 38 50 50 50 58 58 58 58 58 58
35680- 54 54 54 42 42 42 30 30 30 18 18 18
35681- 10 10 10 0 0 0 0 0 0 0 0 0
35682- 0 0 0 0 0 0 0 0 0 0 0 0
35683- 0 0 0 0 0 0 0 0 0 0 0 0
35684- 0 0 0 0 0 0 0 0 0 0 0 0
35685- 0 0 0 0 0 0 0 0 0 0 0 0
35686- 0 0 0 0 0 0 0 0 0 0 0 0
35687- 0 0 0 0 0 0 0 0 0 0 0 0
35688- 0 0 0 0 0 0 0 0 0 0 0 0
35689- 0 0 0 0 0 0 0 0 0 0 0 0
35690- 0 0 0 0 0 0 0 0 0 0 0 0
35691- 0 0 0 0 0 0 0 0 0 6 6 6
35692- 6 6 6 10 10 10 14 14 14 18 18 18
35693- 18 18 18 14 14 14 10 10 10 6 6 6
35694- 0 0 0 0 0 0 0 0 0 0 0 0
35695- 0 0 0 0 0 0 0 0 0 0 0 0
35696- 0 0 0 0 0 0 0 0 0 0 0 0
35697- 0 0 0 0 0 0 0 0 0 0 0 0
35698- 0 0 0 0 0 0 0 0 0 6 6 6
35699- 14 14 14 18 18 18 22 22 22 22 22 22
35700- 18 18 18 14 14 14 10 10 10 6 6 6
35701- 0 0 0 0 0 0 0 0 0 0 0 0
35702- 0 0 0 0 0 0 0 0 0 0 0 0
35703- 0 0 0 0 0 0 0 0 0 0 0 0
35704- 0 0 0 0 0 0 0 0 0 0 0 0
35705- 0 0 0 0 0 0 0 0 0 0 0 0
35706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35719+4 4 4 4 4 4
35720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35733+4 4 4 4 4 4
35734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35747+4 4 4 4 4 4
35748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35761+4 4 4 4 4 4
35762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35775+4 4 4 4 4 4
35776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35789+4 4 4 4 4 4
35790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35794+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
35795+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
35796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35799+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
35800+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35801+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
35802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35803+4 4 4 4 4 4
35804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35808+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
35809+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
35810+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35813+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
35814+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
35815+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
35816+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35817+4 4 4 4 4 4
35818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35822+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
35823+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
35824+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35827+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
35828+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
35829+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
35830+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
35831+4 4 4 4 4 4
35832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35835+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
35836+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
35837+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
35838+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
35839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35840+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35841+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
35842+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
35843+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
35844+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
35845+4 4 4 4 4 4
35846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35849+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
35850+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
35851+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
35852+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
35853+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35854+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
35855+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
35856+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
35857+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
35858+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
35859+4 4 4 4 4 4
35860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
35863+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
35864+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
35865+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
35866+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
35867+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
35868+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
35869+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
35870+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
35871+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
35872+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
35873+4 4 4 4 4 4
35874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35876+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
35877+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
35878+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
35879+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
35880+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
35881+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
35882+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
35883+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
35884+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
35885+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
35886+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
35887+4 4 4 4 4 4
35888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35890+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
35891+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
35892+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
35893+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
35894+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
35895+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
35896+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
35897+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
35898+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
35899+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
35900+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
35901+4 4 4 4 4 4
35902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35904+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
35905+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
35906+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
35907+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
35908+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
35909+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
35910+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
35911+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
35912+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
35913+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
35914+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35915+4 4 4 4 4 4
35916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35918+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
35919+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
35920+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
35921+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
35922+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
35923+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
35924+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
35925+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
35926+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
35927+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
35928+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
35929+4 4 4 4 4 4
35930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35931+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
35932+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
35933+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
35934+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
35935+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
35936+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
35937+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
35938+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
35939+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
35940+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
35941+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
35942+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
35943+4 4 4 4 4 4
35944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35945+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
35946+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
35947+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
35948+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
35949+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
35950+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
35951+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
35952+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
35953+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
35954+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
35955+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
35956+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
35957+0 0 0 4 4 4
35958+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35959+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
35960+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
35961+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
35962+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
35963+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
35964+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
35965+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
35966+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
35967+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
35968+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
35969+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
35970+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
35971+2 0 0 0 0 0
35972+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
35973+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
35974+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
35975+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
35976+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
35977+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
35978+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
35979+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
35980+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
35981+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
35982+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
35983+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
35984+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
35985+37 38 37 0 0 0
35986+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35987+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
35988+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
35989+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
35990+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
35991+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
35992+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
35993+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
35994+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
35995+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
35996+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
35997+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
35998+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
35999+85 115 134 4 0 0
36000+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
36001+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
36002+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
36003+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
36004+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
36005+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
36006+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
36007+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
36008+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
36009+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
36010+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
36011+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
36012+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
36013+60 73 81 4 0 0
36014+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
36015+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
36016+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
36017+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
36018+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
36019+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
36020+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
36021+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
36022+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
36023+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
36024+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
36025+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
36026+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
36027+16 19 21 4 0 0
36028+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
36029+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
36030+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
36031+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
36032+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
36033+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
36034+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
36035+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
36036+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
36037+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
36038+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
36039+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
36040+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
36041+4 0 0 4 3 3
36042+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
36043+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
36044+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
36045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
36046+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
36047+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
36048+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
36049+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
36050+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
36051+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
36052+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
36053+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
36054+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
36055+3 2 2 4 4 4
36056+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
36057+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
36058+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
36059+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
36060+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
36061+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
36062+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
36063+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
36064+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
36065+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
36066+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
36067+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
36068+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
36069+4 4 4 4 4 4
36070+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
36071+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
36072+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
36073+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
36074+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
36075+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
36076+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
36077+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
36078+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
36079+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
36080+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
36081+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
36082+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
36083+4 4 4 4 4 4
36084+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
36085+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
36086+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
36087+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
36088+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
36089+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36090+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
36091+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
36092+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
36093+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
36094+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
36095+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
36096+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
36097+5 5 5 5 5 5
36098+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
36099+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
36100+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
36101+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
36102+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
36103+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36104+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
36105+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
36106+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
36107+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
36108+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
36109+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
36110+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36111+5 5 5 4 4 4
36112+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
36113+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
36114+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
36115+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
36116+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36117+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
36118+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
36119+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
36120+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
36121+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
36122+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
36123+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36125+4 4 4 4 4 4
36126+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
36127+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
36128+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
36129+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
36130+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
36131+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36132+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36133+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
36134+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
36135+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
36136+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
36137+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
36138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36139+4 4 4 4 4 4
36140+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
36141+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
36142+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
36143+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
36144+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36145+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
36146+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
36147+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
36148+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
36149+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
36150+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
36151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36153+4 4 4 4 4 4
36154+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
36155+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
36156+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
36157+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
36158+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36159+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36160+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36161+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
36162+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
36163+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
36164+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
36165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36167+4 4 4 4 4 4
36168+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
36169+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
36170+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
36171+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
36172+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36173+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
36174+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36175+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
36176+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
36177+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
36178+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36181+4 4 4 4 4 4
36182+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
36183+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
36184+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
36185+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
36186+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36187+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
36188+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
36189+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
36190+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
36191+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
36192+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
36193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36195+4 4 4 4 4 4
36196+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
36197+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
36198+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
36199+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
36200+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36201+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
36202+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
36203+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
36204+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
36205+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
36206+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
36207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36209+4 4 4 4 4 4
36210+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
36211+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
36212+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
36213+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
36214+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
36215+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
36216+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
36217+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
36218+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
36219+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
36220+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36223+4 4 4 4 4 4
36224+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
36225+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
36226+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
36227+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
36228+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36229+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
36230+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
36231+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
36232+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
36233+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
36234+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36237+4 4 4 4 4 4
36238+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
36239+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
36240+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
36241+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
36242+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36243+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
36244+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
36245+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
36246+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
36247+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
36248+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36251+4 4 4 4 4 4
36252+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
36253+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
36254+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
36255+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
36256+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36257+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
36258+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
36259+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
36260+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
36261+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36262+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36265+4 4 4 4 4 4
36266+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
36267+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
36268+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
36269+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
36270+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
36271+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
36272+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
36273+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
36274+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36275+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36276+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36279+4 4 4 4 4 4
36280+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
36281+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
36282+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
36283+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
36284+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36285+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
36286+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
36287+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
36288+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36289+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36290+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36293+4 4 4 4 4 4
36294+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
36295+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
36296+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
36297+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
36298+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
36299+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
36300+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
36301+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
36302+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36303+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36304+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36307+4 4 4 4 4 4
36308+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
36309+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
36310+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36311+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
36312+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
36313+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
36314+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
36315+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
36316+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
36317+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36318+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36321+4 4 4 4 4 4
36322+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
36323+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
36324+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
36325+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
36326+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
36327+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
36328+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
36329+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
36330+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36331+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36332+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36335+4 4 4 4 4 4
36336+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
36337+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
36338+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36339+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
36340+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
36341+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
36342+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
36343+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
36344+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
36345+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36346+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36349+4 4 4 4 4 4
36350+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
36351+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
36352+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
36353+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
36354+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
36355+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
36356+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
36357+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
36358+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36359+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36360+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36363+4 4 4 4 4 4
36364+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36365+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
36366+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36367+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
36368+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
36369+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
36370+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
36371+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
36372+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36373+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36374+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36377+4 4 4 4 4 4
36378+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
36379+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
36380+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
36381+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
36382+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
36383+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
36384+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36385+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
36386+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36387+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36388+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36391+4 4 4 4 4 4
36392+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36393+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
36394+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
36395+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
36396+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
36397+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
36398+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36399+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
36400+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36401+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36402+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36405+4 4 4 4 4 4
36406+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
36407+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
36408+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
36409+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
36410+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
36411+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
36412+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
36413+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
36414+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
36415+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36416+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36419+4 4 4 4 4 4
36420+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36421+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
36422+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
36423+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
36424+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
36425+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
36426+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
36427+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
36428+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
36429+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36430+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36433+4 4 4 4 4 4
36434+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
36435+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
36436+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
36437+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
36438+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
36439+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
36440+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
36441+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
36442+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
36443+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36444+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36447+4 4 4 4 4 4
36448+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36449+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
36450+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
36451+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
36452+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
36453+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
36454+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
36455+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
36456+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
36457+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36458+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36461+4 4 4 4 4 4
36462+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
36463+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
36464+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
36465+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
36466+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
36467+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
36468+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
36469+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
36470+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
36471+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
36472+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36475+4 4 4 4 4 4
36476+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
36477+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
36478+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
36479+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
36480+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
36481+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
36482+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
36483+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
36484+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
36485+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
36486+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36489+4 4 4 4 4 4
36490+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
36491+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
36492+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
36493+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
36494+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
36495+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
36496+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36497+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
36498+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
36499+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
36500+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36503+4 4 4 4 4 4
36504+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
36505+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
36506+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
36507+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
36508+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
36509+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
36510+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
36511+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
36512+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
36513+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
36514+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36517+4 4 4 4 4 4
36518+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
36519+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
36520+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36521+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
36522+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
36523+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
36524+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
36525+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
36526+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
36527+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
36528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36531+4 4 4 4 4 4
36532+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36533+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
36534+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
36535+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
36536+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
36537+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
36538+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
36539+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
36540+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
36541+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36545+4 4 4 4 4 4
36546+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
36547+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
36548+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
36549+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
36550+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
36551+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
36552+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
36553+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
36554+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
36555+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36559+4 4 4 4 4 4
36560+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
36561+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
36562+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
36563+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
36564+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
36565+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
36566+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
36567+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
36568+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36569+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36573+4 4 4 4 4 4
36574+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
36575+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36576+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
36577+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36578+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
36579+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
36580+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
36581+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
36582+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
36583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36587+4 4 4 4 4 4
36588+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
36589+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
36590+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
36591+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
36592+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
36593+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
36594+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
36595+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
36596+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
36597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36601+4 4 4 4 4 4
36602+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36603+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
36604+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
36605+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
36606+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
36607+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
36608+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
36609+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
36610+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36615+4 4 4 4 4 4
36616+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
36617+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
36618+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36619+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
36620+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
36621+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
36622+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
36623+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
36624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36629+4 4 4 4 4 4
36630+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36631+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
36632+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
36633+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
36634+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
36635+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
36636+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
36637+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36643+4 4 4 4 4 4
36644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36645+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
36646+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36647+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
36648+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
36649+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
36650+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
36651+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
36652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36657+4 4 4 4 4 4
36658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36659+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
36660+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
36661+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
36662+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
36663+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
36664+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
36665+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
36666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36671+4 4 4 4 4 4
36672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36673+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36674+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
36675+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36676+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
36677+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
36678+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
36679+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36685+4 4 4 4 4 4
36686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36688+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36689+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
36690+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
36691+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
36692+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
36693+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36699+4 4 4 4 4 4
36700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36703+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36704+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
36705+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
36706+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
36707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36713+4 4 4 4 4 4
36714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36717+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36718+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36719+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
36720+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
36721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36727+4 4 4 4 4 4
36728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36731+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36732+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36733+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36734+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
36735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36741+4 4 4 4 4 4
36742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36745+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
36746+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
36747+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
36748+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
36749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36755+4 4 4 4 4 4
36756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36760+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
36761+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36762+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36769+4 4 4 4 4 4
36770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36774+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
36775+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
36776+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36783+4 4 4 4 4 4
36784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36788+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
36789+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
36790+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36797+4 4 4 4 4 4
36798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36802+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
36803+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
36804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36811+4 4 4 4 4 4
36812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36816+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36817+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
36818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36825+4 4 4 4 4 4
36826diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
36827--- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
36828+++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
36829@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
36830 dlfb_urb_completion(urb);
36831
36832 error:
36833- atomic_add(bytes_sent, &dev->bytes_sent);
36834- atomic_add(bytes_identical, &dev->bytes_identical);
36835- atomic_add(width*height*2, &dev->bytes_rendered);
36836+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36837+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36838+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
36839 end_cycles = get_cycles();
36840- atomic_add(((unsigned int) ((end_cycles - start_cycles)
36841+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36842 >> 10)), /* Kcycles */
36843 &dev->cpu_kcycles_used);
36844
36845@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
36846 dlfb_urb_completion(urb);
36847
36848 error:
36849- atomic_add(bytes_sent, &dev->bytes_sent);
36850- atomic_add(bytes_identical, &dev->bytes_identical);
36851- atomic_add(bytes_rendered, &dev->bytes_rendered);
36852+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36853+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36854+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
36855 end_cycles = get_cycles();
36856- atomic_add(((unsigned int) ((end_cycles - start_cycles)
36857+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36858 >> 10)), /* Kcycles */
36859 &dev->cpu_kcycles_used);
36860 }
36861@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
36862 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36863 struct dlfb_data *dev = fb_info->par;
36864 return snprintf(buf, PAGE_SIZE, "%u\n",
36865- atomic_read(&dev->bytes_rendered));
36866+ atomic_read_unchecked(&dev->bytes_rendered));
36867 }
36868
36869 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
36870@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
36871 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36872 struct dlfb_data *dev = fb_info->par;
36873 return snprintf(buf, PAGE_SIZE, "%u\n",
36874- atomic_read(&dev->bytes_identical));
36875+ atomic_read_unchecked(&dev->bytes_identical));
36876 }
36877
36878 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
36879@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
36880 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36881 struct dlfb_data *dev = fb_info->par;
36882 return snprintf(buf, PAGE_SIZE, "%u\n",
36883- atomic_read(&dev->bytes_sent));
36884+ atomic_read_unchecked(&dev->bytes_sent));
36885 }
36886
36887 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
36888@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
36889 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36890 struct dlfb_data *dev = fb_info->par;
36891 return snprintf(buf, PAGE_SIZE, "%u\n",
36892- atomic_read(&dev->cpu_kcycles_used));
36893+ atomic_read_unchecked(&dev->cpu_kcycles_used));
36894 }
36895
36896 static ssize_t edid_show(
36897@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
36898 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36899 struct dlfb_data *dev = fb_info->par;
36900
36901- atomic_set(&dev->bytes_rendered, 0);
36902- atomic_set(&dev->bytes_identical, 0);
36903- atomic_set(&dev->bytes_sent, 0);
36904- atomic_set(&dev->cpu_kcycles_used, 0);
36905+ atomic_set_unchecked(&dev->bytes_rendered, 0);
36906+ atomic_set_unchecked(&dev->bytes_identical, 0);
36907+ atomic_set_unchecked(&dev->bytes_sent, 0);
36908+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
36909
36910 return count;
36911 }
36912diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
36913--- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
36914+++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
36915@@ -19,6 +19,7 @@
36916 #include <linux/io.h>
36917 #include <linux/mutex.h>
36918 #include <linux/slab.h>
36919+#include <linux/moduleloader.h>
36920 #include <video/edid.h>
36921 #include <video/uvesafb.h>
36922 #ifdef CONFIG_X86
36923@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
36924 NULL,
36925 };
36926
36927- return call_usermodehelper(v86d_path, argv, envp, 1);
36928+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36929 }
36930
36931 /*
36932@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
36933 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36934 par->pmi_setpal = par->ypan = 0;
36935 } else {
36936+
36937+#ifdef CONFIG_PAX_KERNEXEC
36938+#ifdef CONFIG_MODULES
36939+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36940+#endif
36941+ if (!par->pmi_code) {
36942+ par->pmi_setpal = par->ypan = 0;
36943+ return 0;
36944+ }
36945+#endif
36946+
36947 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36948 + task->t.regs.edi);
36949+
36950+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36951+ pax_open_kernel();
36952+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36953+ pax_close_kernel();
36954+
36955+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36956+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36957+#else
36958 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36959 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36960+#endif
36961+
36962 printk(KERN_INFO "uvesafb: protected mode interface info at "
36963 "%04x:%04x\n",
36964 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36965@@ -1821,6 +1844,11 @@ out:
36966 if (par->vbe_modes)
36967 kfree(par->vbe_modes);
36968
36969+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36970+ if (par->pmi_code)
36971+ module_free_exec(NULL, par->pmi_code);
36972+#endif
36973+
36974 framebuffer_release(info);
36975 return err;
36976 }
36977@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
36978 kfree(par->vbe_state_orig);
36979 if (par->vbe_state_saved)
36980 kfree(par->vbe_state_saved);
36981+
36982+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36983+ if (par->pmi_code)
36984+ module_free_exec(NULL, par->pmi_code);
36985+#endif
36986+
36987 }
36988
36989 framebuffer_release(info);
36990diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
36991--- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
36992+++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
36993@@ -9,6 +9,7 @@
36994 */
36995
36996 #include <linux/module.h>
36997+#include <linux/moduleloader.h>
36998 #include <linux/kernel.h>
36999 #include <linux/errno.h>
37000 #include <linux/string.h>
37001@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
37002 static int vram_total __initdata; /* Set total amount of memory */
37003 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
37004 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
37005-static void (*pmi_start)(void) __read_mostly;
37006-static void (*pmi_pal) (void) __read_mostly;
37007+static void (*pmi_start)(void) __read_only;
37008+static void (*pmi_pal) (void) __read_only;
37009 static int depth __read_mostly;
37010 static int vga_compat __read_mostly;
37011 /* --------------------------------------------------------------------- */
37012@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
37013 unsigned int size_vmode;
37014 unsigned int size_remap;
37015 unsigned int size_total;
37016+ void *pmi_code = NULL;
37017
37018 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
37019 return -ENODEV;
37020@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
37021 size_remap = size_total;
37022 vesafb_fix.smem_len = size_remap;
37023
37024-#ifndef __i386__
37025- screen_info.vesapm_seg = 0;
37026-#endif
37027-
37028 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
37029 printk(KERN_WARNING
37030 "vesafb: cannot reserve video memory at 0x%lx\n",
37031@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
37032 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
37033 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
37034
37035+#ifdef __i386__
37036+
37037+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37038+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
37039+ if (!pmi_code)
37040+#elif !defined(CONFIG_PAX_KERNEXEC)
37041+ if (0)
37042+#endif
37043+
37044+#endif
37045+ screen_info.vesapm_seg = 0;
37046+
37047 if (screen_info.vesapm_seg) {
37048- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
37049- screen_info.vesapm_seg,screen_info.vesapm_off);
37050+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
37051+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
37052 }
37053
37054 if (screen_info.vesapm_seg < 0xc000)
37055@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
37056
37057 if (ypan || pmi_setpal) {
37058 unsigned short *pmi_base;
37059+
37060 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
37061- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
37062- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
37063+
37064+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37065+ pax_open_kernel();
37066+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
37067+#else
37068+ pmi_code = pmi_base;
37069+#endif
37070+
37071+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
37072+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
37073+
37074+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37075+ pmi_start = ktva_ktla(pmi_start);
37076+ pmi_pal = ktva_ktla(pmi_pal);
37077+ pax_close_kernel();
37078+#endif
37079+
37080 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
37081 if (pmi_base[3]) {
37082 printk(KERN_INFO "vesafb: pmi: ports = ");
37083@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
37084 info->node, info->fix.id);
37085 return 0;
37086 err:
37087+
37088+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37089+ module_free_exec(NULL, pmi_code);
37090+#endif
37091+
37092 if (info->screen_base)
37093 iounmap(info->screen_base);
37094 framebuffer_release(info);
37095diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
37096--- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
37097+++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
37098@@ -56,7 +56,7 @@ struct via_clock {
37099
37100 void (*set_engine_pll_state)(u8 state);
37101 void (*set_engine_pll)(struct via_pll_config config);
37102-};
37103+} __no_const;
37104
37105
37106 static inline u32 get_pll_internal_frequency(u32 ref_freq,
37107diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
37108--- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
37109+++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
37110@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
37111 struct sysinfo i;
37112 int idx = 0;
37113
37114+ pax_track_stack();
37115+
37116 all_vm_events(events);
37117 si_meminfo(&i);
37118
37119diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
37120--- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
37121+++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
37122@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
37123 void
37124 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37125 {
37126- char *s = nd_get_link(nd);
37127+ const char *s = nd_get_link(nd);
37128
37129 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
37130 IS_ERR(s) ? "<error>" : s);
37131diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
37132--- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
37133+++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
37134@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
37135 size += sizeof(struct io_event) * nr_events;
37136 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
37137
37138- if (nr_pages < 0)
37139+ if (nr_pages <= 0)
37140 return -EINVAL;
37141
37142 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
37143@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
37144 struct aio_timeout to;
37145 int retry = 0;
37146
37147+ pax_track_stack();
37148+
37149 /* needed to zero any padding within an entry (there shouldn't be
37150 * any, but C is fun!
37151 */
37152@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
37153 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
37154 {
37155 ssize_t ret;
37156+ struct iovec iovstack;
37157
37158 #ifdef CONFIG_COMPAT
37159 if (compat)
37160 ret = compat_rw_copy_check_uvector(type,
37161 (struct compat_iovec __user *)kiocb->ki_buf,
37162- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37163+ kiocb->ki_nbytes, 1, &iovstack,
37164 &kiocb->ki_iovec);
37165 else
37166 #endif
37167 ret = rw_copy_check_uvector(type,
37168 (struct iovec __user *)kiocb->ki_buf,
37169- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37170+ kiocb->ki_nbytes, 1, &iovstack,
37171 &kiocb->ki_iovec);
37172 if (ret < 0)
37173 goto out;
37174
37175+ if (kiocb->ki_iovec == &iovstack) {
37176+ kiocb->ki_inline_vec = iovstack;
37177+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
37178+ }
37179 kiocb->ki_nr_segs = kiocb->ki_nbytes;
37180 kiocb->ki_cur_seg = 0;
37181 /* ki_nbytes/left now reflect bytes instead of segs */
37182diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
37183--- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
37184+++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
37185@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
37186 unsigned long limit;
37187
37188 limit = rlimit(RLIMIT_FSIZE);
37189+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
37190 if (limit != RLIM_INFINITY && offset > limit)
37191 goto out_sig;
37192 if (offset > inode->i_sb->s_maxbytes)
37193diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
37194--- linux-3.0.4/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
37195+++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
37196@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
37197 {
37198 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
37199 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
37200- char *link = nd_get_link(nd);
37201+ const char *link = nd_get_link(nd);
37202 if (!IS_ERR(link))
37203 kfree(link);
37204 }
37205diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
37206--- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
37207+++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
37208@@ -16,6 +16,7 @@
37209 #include <linux/string.h>
37210 #include <linux/fs.h>
37211 #include <linux/file.h>
37212+#include <linux/security.h>
37213 #include <linux/stat.h>
37214 #include <linux/fcntl.h>
37215 #include <linux/ptrace.h>
37216@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
37217 #endif
37218 # define START_STACK(u) ((void __user *)u.start_stack)
37219
37220+ memset(&dump, 0, sizeof(dump));
37221+
37222 fs = get_fs();
37223 set_fs(KERNEL_DS);
37224 has_dumped = 1;
37225@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
37226
37227 /* If the size of the dump file exceeds the rlimit, then see what would happen
37228 if we wrote the stack, but not the data area. */
37229+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
37230 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
37231 dump.u_dsize = 0;
37232
37233 /* Make sure we have enough room to write the stack and data areas. */
37234+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
37235 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
37236 dump.u_ssize = 0;
37237
37238@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
37239 rlim = rlimit(RLIMIT_DATA);
37240 if (rlim >= RLIM_INFINITY)
37241 rlim = ~0;
37242+
37243+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
37244 if (ex.a_data + ex.a_bss > rlim)
37245 return -ENOMEM;
37246
37247@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
37248 install_exec_creds(bprm);
37249 current->flags &= ~PF_FORKNOEXEC;
37250
37251+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37252+ current->mm->pax_flags = 0UL;
37253+#endif
37254+
37255+#ifdef CONFIG_PAX_PAGEEXEC
37256+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
37257+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
37258+
37259+#ifdef CONFIG_PAX_EMUTRAMP
37260+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
37261+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
37262+#endif
37263+
37264+#ifdef CONFIG_PAX_MPROTECT
37265+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
37266+ current->mm->pax_flags |= MF_PAX_MPROTECT;
37267+#endif
37268+
37269+ }
37270+#endif
37271+
37272 if (N_MAGIC(ex) == OMAGIC) {
37273 unsigned long text_addr, map_size;
37274 loff_t pos;
37275@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
37276
37277 down_write(&current->mm->mmap_sem);
37278 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
37279- PROT_READ | PROT_WRITE | PROT_EXEC,
37280+ PROT_READ | PROT_WRITE,
37281 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
37282 fd_offset + ex.a_text);
37283 up_write(&current->mm->mmap_sem);
37284diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
37285--- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
37286+++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
37287@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
37288 #define elf_core_dump NULL
37289 #endif
37290
37291+#ifdef CONFIG_PAX_MPROTECT
37292+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
37293+#endif
37294+
37295 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
37296 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
37297 #else
37298@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
37299 .load_binary = load_elf_binary,
37300 .load_shlib = load_elf_library,
37301 .core_dump = elf_core_dump,
37302+
37303+#ifdef CONFIG_PAX_MPROTECT
37304+ .handle_mprotect= elf_handle_mprotect,
37305+#endif
37306+
37307 .min_coredump = ELF_EXEC_PAGESIZE,
37308 };
37309
37310@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
37311
37312 static int set_brk(unsigned long start, unsigned long end)
37313 {
37314+ unsigned long e = end;
37315+
37316 start = ELF_PAGEALIGN(start);
37317 end = ELF_PAGEALIGN(end);
37318 if (end > start) {
37319@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
37320 if (BAD_ADDR(addr))
37321 return addr;
37322 }
37323- current->mm->start_brk = current->mm->brk = end;
37324+ current->mm->start_brk = current->mm->brk = e;
37325 return 0;
37326 }
37327
37328@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
37329 elf_addr_t __user *u_rand_bytes;
37330 const char *k_platform = ELF_PLATFORM;
37331 const char *k_base_platform = ELF_BASE_PLATFORM;
37332- unsigned char k_rand_bytes[16];
37333+ u32 k_rand_bytes[4];
37334 int items;
37335 elf_addr_t *elf_info;
37336 int ei_index = 0;
37337 const struct cred *cred = current_cred();
37338 struct vm_area_struct *vma;
37339+ unsigned long saved_auxv[AT_VECTOR_SIZE];
37340+
37341+ pax_track_stack();
37342
37343 /*
37344 * In some cases (e.g. Hyper-Threading), we want to avoid L1
37345@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
37346 * Generate 16 random bytes for userspace PRNG seeding.
37347 */
37348 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
37349- u_rand_bytes = (elf_addr_t __user *)
37350- STACK_ALLOC(p, sizeof(k_rand_bytes));
37351+ srandom32(k_rand_bytes[0] ^ random32());
37352+ srandom32(k_rand_bytes[1] ^ random32());
37353+ srandom32(k_rand_bytes[2] ^ random32());
37354+ srandom32(k_rand_bytes[3] ^ random32());
37355+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
37356+ u_rand_bytes = (elf_addr_t __user *) p;
37357 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
37358 return -EFAULT;
37359
37360@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
37361 return -EFAULT;
37362 current->mm->env_end = p;
37363
37364+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
37365+
37366 /* Put the elf_info on the stack in the right place. */
37367 sp = (elf_addr_t __user *)envp + 1;
37368- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
37369+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
37370 return -EFAULT;
37371 return 0;
37372 }
37373@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
37374 {
37375 struct elf_phdr *elf_phdata;
37376 struct elf_phdr *eppnt;
37377- unsigned long load_addr = 0;
37378+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
37379 int load_addr_set = 0;
37380 unsigned long last_bss = 0, elf_bss = 0;
37381- unsigned long error = ~0UL;
37382+ unsigned long error = -EINVAL;
37383 unsigned long total_size;
37384 int retval, i, size;
37385
37386@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
37387 goto out_close;
37388 }
37389
37390+#ifdef CONFIG_PAX_SEGMEXEC
37391+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
37392+ pax_task_size = SEGMEXEC_TASK_SIZE;
37393+#endif
37394+
37395 eppnt = elf_phdata;
37396 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
37397 if (eppnt->p_type == PT_LOAD) {
37398@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
37399 k = load_addr + eppnt->p_vaddr;
37400 if (BAD_ADDR(k) ||
37401 eppnt->p_filesz > eppnt->p_memsz ||
37402- eppnt->p_memsz > TASK_SIZE ||
37403- TASK_SIZE - eppnt->p_memsz < k) {
37404+ eppnt->p_memsz > pax_task_size ||
37405+ pax_task_size - eppnt->p_memsz < k) {
37406 error = -ENOMEM;
37407 goto out_close;
37408 }
37409@@ -528,6 +553,193 @@ out:
37410 return error;
37411 }
37412
37413+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
37414+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
37415+{
37416+ unsigned long pax_flags = 0UL;
37417+
37418+#ifdef CONFIG_PAX_PAGEEXEC
37419+ if (elf_phdata->p_flags & PF_PAGEEXEC)
37420+ pax_flags |= MF_PAX_PAGEEXEC;
37421+#endif
37422+
37423+#ifdef CONFIG_PAX_SEGMEXEC
37424+ if (elf_phdata->p_flags & PF_SEGMEXEC)
37425+ pax_flags |= MF_PAX_SEGMEXEC;
37426+#endif
37427+
37428+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37429+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37430+ if ((__supported_pte_mask & _PAGE_NX))
37431+ pax_flags &= ~MF_PAX_SEGMEXEC;
37432+ else
37433+ pax_flags &= ~MF_PAX_PAGEEXEC;
37434+ }
37435+#endif
37436+
37437+#ifdef CONFIG_PAX_EMUTRAMP
37438+ if (elf_phdata->p_flags & PF_EMUTRAMP)
37439+ pax_flags |= MF_PAX_EMUTRAMP;
37440+#endif
37441+
37442+#ifdef CONFIG_PAX_MPROTECT
37443+ if (elf_phdata->p_flags & PF_MPROTECT)
37444+ pax_flags |= MF_PAX_MPROTECT;
37445+#endif
37446+
37447+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
37448+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
37449+ pax_flags |= MF_PAX_RANDMMAP;
37450+#endif
37451+
37452+ return pax_flags;
37453+}
37454+#endif
37455+
37456+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37457+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
37458+{
37459+ unsigned long pax_flags = 0UL;
37460+
37461+#ifdef CONFIG_PAX_PAGEEXEC
37462+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
37463+ pax_flags |= MF_PAX_PAGEEXEC;
37464+#endif
37465+
37466+#ifdef CONFIG_PAX_SEGMEXEC
37467+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
37468+ pax_flags |= MF_PAX_SEGMEXEC;
37469+#endif
37470+
37471+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37472+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37473+ if ((__supported_pte_mask & _PAGE_NX))
37474+ pax_flags &= ~MF_PAX_SEGMEXEC;
37475+ else
37476+ pax_flags &= ~MF_PAX_PAGEEXEC;
37477+ }
37478+#endif
37479+
37480+#ifdef CONFIG_PAX_EMUTRAMP
37481+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
37482+ pax_flags |= MF_PAX_EMUTRAMP;
37483+#endif
37484+
37485+#ifdef CONFIG_PAX_MPROTECT
37486+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
37487+ pax_flags |= MF_PAX_MPROTECT;
37488+#endif
37489+
37490+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
37491+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
37492+ pax_flags |= MF_PAX_RANDMMAP;
37493+#endif
37494+
37495+ return pax_flags;
37496+}
37497+#endif
37498+
37499+#ifdef CONFIG_PAX_EI_PAX
37500+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
37501+{
37502+ unsigned long pax_flags = 0UL;
37503+
37504+#ifdef CONFIG_PAX_PAGEEXEC
37505+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
37506+ pax_flags |= MF_PAX_PAGEEXEC;
37507+#endif
37508+
37509+#ifdef CONFIG_PAX_SEGMEXEC
37510+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
37511+ pax_flags |= MF_PAX_SEGMEXEC;
37512+#endif
37513+
37514+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37515+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37516+ if ((__supported_pte_mask & _PAGE_NX))
37517+ pax_flags &= ~MF_PAX_SEGMEXEC;
37518+ else
37519+ pax_flags &= ~MF_PAX_PAGEEXEC;
37520+ }
37521+#endif
37522+
37523+#ifdef CONFIG_PAX_EMUTRAMP
37524+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
37525+ pax_flags |= MF_PAX_EMUTRAMP;
37526+#endif
37527+
37528+#ifdef CONFIG_PAX_MPROTECT
37529+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
37530+ pax_flags |= MF_PAX_MPROTECT;
37531+#endif
37532+
37533+#ifdef CONFIG_PAX_ASLR
37534+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
37535+ pax_flags |= MF_PAX_RANDMMAP;
37536+#endif
37537+
37538+ return pax_flags;
37539+}
37540+#endif
37541+
37542+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37543+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
37544+{
37545+ unsigned long pax_flags = 0UL;
37546+
37547+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37548+ unsigned long i;
37549+ int found_flags = 0;
37550+#endif
37551+
37552+#ifdef CONFIG_PAX_EI_PAX
37553+ pax_flags = pax_parse_ei_pax(elf_ex);
37554+#endif
37555+
37556+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37557+ for (i = 0UL; i < elf_ex->e_phnum; i++)
37558+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37559+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37560+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37561+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37562+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37563+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37564+ return -EINVAL;
37565+
37566+#ifdef CONFIG_PAX_SOFTMODE
37567+ if (pax_softmode)
37568+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
37569+ else
37570+#endif
37571+
37572+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37573+ found_flags = 1;
37574+ break;
37575+ }
37576+#endif
37577+
37578+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37579+ if (found_flags == 0) {
37580+ struct elf_phdr phdr;
37581+ memset(&phdr, 0, sizeof(phdr));
37582+ phdr.p_flags = PF_NOEMUTRAMP;
37583+#ifdef CONFIG_PAX_SOFTMODE
37584+ if (pax_softmode)
37585+ pax_flags = pax_parse_softmode(&phdr);
37586+ else
37587+#endif
37588+ pax_flags = pax_parse_hardmode(&phdr);
37589+ }
37590+#endif
37591+
37592+ if (0 > pax_check_flags(&pax_flags))
37593+ return -EINVAL;
37594+
37595+ current->mm->pax_flags = pax_flags;
37596+ return 0;
37597+}
37598+#endif
37599+
37600 /*
37601 * These are the functions used to load ELF style executables and shared
37602 * libraries. There is no binary dependent code anywhere else.
37603@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
37604 {
37605 unsigned int random_variable = 0;
37606
37607+#ifdef CONFIG_PAX_RANDUSTACK
37608+ if (randomize_va_space)
37609+ return stack_top - current->mm->delta_stack;
37610+#endif
37611+
37612 if ((current->flags & PF_RANDOMIZE) &&
37613 !(current->personality & ADDR_NO_RANDOMIZE)) {
37614 random_variable = get_random_int() & STACK_RND_MASK;
37615@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
37616 unsigned long load_addr = 0, load_bias = 0;
37617 int load_addr_set = 0;
37618 char * elf_interpreter = NULL;
37619- unsigned long error;
37620+ unsigned long error = 0;
37621 struct elf_phdr *elf_ppnt, *elf_phdata;
37622 unsigned long elf_bss, elf_brk;
37623 int retval, i;
37624@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
37625 unsigned long start_code, end_code, start_data, end_data;
37626 unsigned long reloc_func_desc __maybe_unused = 0;
37627 int executable_stack = EXSTACK_DEFAULT;
37628- unsigned long def_flags = 0;
37629 struct {
37630 struct elfhdr elf_ex;
37631 struct elfhdr interp_elf_ex;
37632 } *loc;
37633+ unsigned long pax_task_size = TASK_SIZE;
37634
37635 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37636 if (!loc) {
37637@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
37638
37639 /* OK, This is the point of no return */
37640 current->flags &= ~PF_FORKNOEXEC;
37641- current->mm->def_flags = def_flags;
37642+
37643+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37644+ current->mm->pax_flags = 0UL;
37645+#endif
37646+
37647+#ifdef CONFIG_PAX_DLRESOLVE
37648+ current->mm->call_dl_resolve = 0UL;
37649+#endif
37650+
37651+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37652+ current->mm->call_syscall = 0UL;
37653+#endif
37654+
37655+#ifdef CONFIG_PAX_ASLR
37656+ current->mm->delta_mmap = 0UL;
37657+ current->mm->delta_stack = 0UL;
37658+#endif
37659+
37660+ current->mm->def_flags = 0;
37661+
37662+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37663+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37664+ send_sig(SIGKILL, current, 0);
37665+ goto out_free_dentry;
37666+ }
37667+#endif
37668+
37669+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37670+ pax_set_initial_flags(bprm);
37671+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37672+ if (pax_set_initial_flags_func)
37673+ (pax_set_initial_flags_func)(bprm);
37674+#endif
37675+
37676+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37677+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
37678+ current->mm->context.user_cs_limit = PAGE_SIZE;
37679+ current->mm->def_flags |= VM_PAGEEXEC;
37680+ }
37681+#endif
37682+
37683+#ifdef CONFIG_PAX_SEGMEXEC
37684+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37685+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37686+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37687+ pax_task_size = SEGMEXEC_TASK_SIZE;
37688+ current->mm->def_flags |= VM_NOHUGEPAGE;
37689+ }
37690+#endif
37691+
37692+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37693+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37694+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37695+ put_cpu();
37696+ }
37697+#endif
37698
37699 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37700 may depend on the personality. */
37701 SET_PERSONALITY(loc->elf_ex);
37702+
37703+#ifdef CONFIG_PAX_ASLR
37704+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37705+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37706+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37707+ }
37708+#endif
37709+
37710+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37711+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37712+ executable_stack = EXSTACK_DISABLE_X;
37713+ current->personality &= ~READ_IMPLIES_EXEC;
37714+ } else
37715+#endif
37716+
37717 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37718 current->personality |= READ_IMPLIES_EXEC;
37719
37720@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
37721 #else
37722 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37723 #endif
37724+
37725+#ifdef CONFIG_PAX_RANDMMAP
37726+ /* PaX: randomize base address at the default exe base if requested */
37727+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37728+#ifdef CONFIG_SPARC64
37729+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37730+#else
37731+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37732+#endif
37733+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37734+ elf_flags |= MAP_FIXED;
37735+ }
37736+#endif
37737+
37738 }
37739
37740 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37741@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
37742 * allowed task size. Note that p_filesz must always be
37743 * <= p_memsz so it is only necessary to check p_memsz.
37744 */
37745- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37746- elf_ppnt->p_memsz > TASK_SIZE ||
37747- TASK_SIZE - elf_ppnt->p_memsz < k) {
37748+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37749+ elf_ppnt->p_memsz > pax_task_size ||
37750+ pax_task_size - elf_ppnt->p_memsz < k) {
37751 /* set_brk can never work. Avoid overflows. */
37752 send_sig(SIGKILL, current, 0);
37753 retval = -EINVAL;
37754@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
37755 start_data += load_bias;
37756 end_data += load_bias;
37757
37758+#ifdef CONFIG_PAX_RANDMMAP
37759+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37760+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37761+#endif
37762+
37763 /* Calling set_brk effectively mmaps the pages that we need
37764 * for the bss and break sections. We must do this before
37765 * mapping in the interpreter, to make sure it doesn't wind
37766@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
37767 goto out_free_dentry;
37768 }
37769 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37770- send_sig(SIGSEGV, current, 0);
37771- retval = -EFAULT; /* Nobody gets to see this, but.. */
37772- goto out_free_dentry;
37773+ /*
37774+ * This bss-zeroing can fail if the ELF
37775+ * file specifies odd protections. So
37776+ * we don't check the return value
37777+ */
37778 }
37779
37780 if (elf_interpreter) {
37781@@ -1090,7 +1398,7 @@ out:
37782 * Decide what to dump of a segment, part, all or none.
37783 */
37784 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37785- unsigned long mm_flags)
37786+ unsigned long mm_flags, long signr)
37787 {
37788 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37789
37790@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
37791 if (vma->vm_file == NULL)
37792 return 0;
37793
37794- if (FILTER(MAPPED_PRIVATE))
37795+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37796 goto whole;
37797
37798 /*
37799@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
37800 {
37801 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37802 int i = 0;
37803- do
37804+ do {
37805 i += 2;
37806- while (auxv[i - 2] != AT_NULL);
37807+ } while (auxv[i - 2] != AT_NULL);
37808 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37809 }
37810
37811@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
37812 }
37813
37814 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
37815- unsigned long mm_flags)
37816+ struct coredump_params *cprm)
37817 {
37818 struct vm_area_struct *vma;
37819 size_t size = 0;
37820
37821 for (vma = first_vma(current, gate_vma); vma != NULL;
37822 vma = next_vma(vma, gate_vma))
37823- size += vma_dump_size(vma, mm_flags);
37824+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37825 return size;
37826 }
37827
37828@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
37829
37830 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
37831
37832- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
37833+ offset += elf_core_vma_data_size(gate_vma, cprm);
37834 offset += elf_core_extra_data_size();
37835 e_shoff = offset;
37836
37837@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
37838 offset = dataoff;
37839
37840 size += sizeof(*elf);
37841+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37842 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
37843 goto end_coredump;
37844
37845 size += sizeof(*phdr4note);
37846+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37847 if (size > cprm->limit
37848 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
37849 goto end_coredump;
37850@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
37851 phdr.p_offset = offset;
37852 phdr.p_vaddr = vma->vm_start;
37853 phdr.p_paddr = 0;
37854- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
37855+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37856 phdr.p_memsz = vma->vm_end - vma->vm_start;
37857 offset += phdr.p_filesz;
37858 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37859@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
37860 phdr.p_align = ELF_EXEC_PAGESIZE;
37861
37862 size += sizeof(phdr);
37863+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37864 if (size > cprm->limit
37865 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
37866 goto end_coredump;
37867@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
37868 unsigned long addr;
37869 unsigned long end;
37870
37871- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
37872+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37873
37874 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37875 struct page *page;
37876@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
37877 page = get_dump_page(addr);
37878 if (page) {
37879 void *kaddr = kmap(page);
37880+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37881 stop = ((size += PAGE_SIZE) > cprm->limit) ||
37882 !dump_write(cprm->file, kaddr,
37883 PAGE_SIZE);
37884@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
37885
37886 if (e_phnum == PN_XNUM) {
37887 size += sizeof(*shdr4extnum);
37888+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37889 if (size > cprm->limit
37890 || !dump_write(cprm->file, shdr4extnum,
37891 sizeof(*shdr4extnum)))
37892@@ -2067,6 +2380,97 @@ out:
37893
37894 #endif /* CONFIG_ELF_CORE */
37895
37896+#ifdef CONFIG_PAX_MPROTECT
37897+/* PaX: non-PIC ELF libraries need relocations on their executable segments
37898+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37899+ * we'll remove VM_MAYWRITE for good on RELRO segments.
37900+ *
37901+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37902+ * basis because we want to allow the common case and not the special ones.
37903+ */
37904+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37905+{
37906+ struct elfhdr elf_h;
37907+ struct elf_phdr elf_p;
37908+ unsigned long i;
37909+ unsigned long oldflags;
37910+ bool is_textrel_rw, is_textrel_rx, is_relro;
37911+
37912+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37913+ return;
37914+
37915+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37916+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37917+
37918+#ifdef CONFIG_PAX_ELFRELOCS
37919+ /* possible TEXTREL */
37920+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37921+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37922+#else
37923+ is_textrel_rw = false;
37924+ is_textrel_rx = false;
37925+#endif
37926+
37927+ /* possible RELRO */
37928+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37929+
37930+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37931+ return;
37932+
37933+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37934+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37935+
37936+#ifdef CONFIG_PAX_ETEXECRELOCS
37937+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37938+#else
37939+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37940+#endif
37941+
37942+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37943+ !elf_check_arch(&elf_h) ||
37944+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37945+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37946+ return;
37947+
37948+ for (i = 0UL; i < elf_h.e_phnum; i++) {
37949+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37950+ return;
37951+ switch (elf_p.p_type) {
37952+ case PT_DYNAMIC:
37953+ if (!is_textrel_rw && !is_textrel_rx)
37954+ continue;
37955+ i = 0UL;
37956+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37957+ elf_dyn dyn;
37958+
37959+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37960+ return;
37961+ if (dyn.d_tag == DT_NULL)
37962+ return;
37963+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37964+ gr_log_textrel(vma);
37965+ if (is_textrel_rw)
37966+ vma->vm_flags |= VM_MAYWRITE;
37967+ else
37968+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37969+ vma->vm_flags &= ~VM_MAYWRITE;
37970+ return;
37971+ }
37972+ i++;
37973+ }
37974+ return;
37975+
37976+ case PT_GNU_RELRO:
37977+ if (!is_relro)
37978+ continue;
37979+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37980+ vma->vm_flags &= ~VM_MAYWRITE;
37981+ return;
37982+ }
37983+ }
37984+}
37985+#endif
37986+
37987 static int __init init_elf_binfmt(void)
37988 {
37989 return register_binfmt(&elf_format);
37990diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
37991--- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
37992+++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
37993@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
37994 realdatastart = (unsigned long) -ENOMEM;
37995 printk("Unable to allocate RAM for process data, errno %d\n",
37996 (int)-realdatastart);
37997+ down_write(&current->mm->mmap_sem);
37998 do_munmap(current->mm, textpos, text_len);
37999+ up_write(&current->mm->mmap_sem);
38000 ret = realdatastart;
38001 goto err;
38002 }
38003@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
38004 }
38005 if (IS_ERR_VALUE(result)) {
38006 printk("Unable to read data+bss, errno %d\n", (int)-result);
38007+ down_write(&current->mm->mmap_sem);
38008 do_munmap(current->mm, textpos, text_len);
38009 do_munmap(current->mm, realdatastart, len);
38010+ up_write(&current->mm->mmap_sem);
38011 ret = result;
38012 goto err;
38013 }
38014@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
38015 }
38016 if (IS_ERR_VALUE(result)) {
38017 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
38018+ down_write(&current->mm->mmap_sem);
38019 do_munmap(current->mm, textpos, text_len + data_len + extra +
38020 MAX_SHARED_LIBS * sizeof(unsigned long));
38021+ up_write(&current->mm->mmap_sem);
38022 ret = result;
38023 goto err;
38024 }
38025diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
38026--- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
38027+++ linux-3.0.4/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
38028@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
38029 const int read = bio_data_dir(bio) == READ;
38030 struct bio_map_data *bmd = bio->bi_private;
38031 int i;
38032- char *p = bmd->sgvecs[0].iov_base;
38033+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
38034
38035 __bio_for_each_segment(bvec, bio, i, 0) {
38036 char *addr = page_address(bvec->bv_page);
38037diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
38038--- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
38039+++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
38040@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
38041 else if (bdev->bd_contains == bdev)
38042 return true; /* is a whole device which isn't held */
38043
38044- else if (whole->bd_holder == bd_may_claim)
38045+ else if (whole->bd_holder == (void *)bd_may_claim)
38046 return true; /* is a partition of a device that is being partitioned */
38047 else if (whole->bd_holder != NULL)
38048 return false; /* is a partition of a held device */
38049diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
38050--- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
38051+++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
38052@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
38053 free_extent_buffer(buf);
38054 add_root_to_dirty_list(root);
38055 } else {
38056- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
38057- parent_start = parent->start;
38058- else
38059+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
38060+ if (parent)
38061+ parent_start = parent->start;
38062+ else
38063+ parent_start = 0;
38064+ } else
38065 parent_start = 0;
38066
38067 WARN_ON(trans->transid != btrfs_header_generation(parent));
38068diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
38069--- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38070+++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38071@@ -6895,7 +6895,7 @@ fail:
38072 return -ENOMEM;
38073 }
38074
38075-static int btrfs_getattr(struct vfsmount *mnt,
38076+int btrfs_getattr(struct vfsmount *mnt,
38077 struct dentry *dentry, struct kstat *stat)
38078 {
38079 struct inode *inode = dentry->d_inode;
38080@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
38081 return 0;
38082 }
38083
38084+EXPORT_SYMBOL(btrfs_getattr);
38085+
38086+dev_t get_btrfs_dev_from_inode(struct inode *inode)
38087+{
38088+ return BTRFS_I(inode)->root->anon_super.s_dev;
38089+}
38090+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
38091+
38092 /*
38093 * If a file is moved, it will inherit the cow and compression flags of the new
38094 * directory.
38095diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
38096--- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38097+++ linux-3.0.4/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
38098@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
38099 for (i = 0; i < num_types; i++) {
38100 struct btrfs_space_info *tmp;
38101
38102+ /* Don't copy in more than we allocated */
38103 if (!slot_count)
38104 break;
38105
38106+ slot_count--;
38107+
38108 info = NULL;
38109 rcu_read_lock();
38110 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
38111@@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
38112 memcpy(dest, &space, sizeof(space));
38113 dest++;
38114 space_args.total_spaces++;
38115- slot_count--;
38116 }
38117- if (!slot_count)
38118- break;
38119 }
38120 up_read(&info->groups_sem);
38121 }
38122diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
38123--- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
38124+++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
38125@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
38126 }
38127 spin_unlock(&rc->reloc_root_tree.lock);
38128
38129- BUG_ON((struct btrfs_root *)node->data != root);
38130+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
38131
38132 if (!del) {
38133 spin_lock(&rc->reloc_root_tree.lock);
38134diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
38135--- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
38136+++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
38137@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
38138 args);
38139
38140 /* start by checking things over */
38141- ASSERT(cache->fstop_percent >= 0 &&
38142- cache->fstop_percent < cache->fcull_percent &&
38143+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
38144 cache->fcull_percent < cache->frun_percent &&
38145 cache->frun_percent < 100);
38146
38147- ASSERT(cache->bstop_percent >= 0 &&
38148- cache->bstop_percent < cache->bcull_percent &&
38149+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
38150 cache->bcull_percent < cache->brun_percent &&
38151 cache->brun_percent < 100);
38152
38153diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
38154--- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
38155+++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
38156@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
38157 if (n > buflen)
38158 return -EMSGSIZE;
38159
38160- if (copy_to_user(_buffer, buffer, n) != 0)
38161+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
38162 return -EFAULT;
38163
38164 return n;
38165@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
38166 if (test_bit(CACHEFILES_DEAD, &cache->flags))
38167 return -EIO;
38168
38169- if (datalen < 0 || datalen > PAGE_SIZE - 1)
38170+ if (datalen > PAGE_SIZE - 1)
38171 return -EOPNOTSUPP;
38172
38173 /* drag the command string into the kernel so we can parse it */
38174@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
38175 if (args[0] != '%' || args[1] != '\0')
38176 return -EINVAL;
38177
38178- if (fstop < 0 || fstop >= cache->fcull_percent)
38179+ if (fstop >= cache->fcull_percent)
38180 return cachefiles_daemon_range_error(cache, args);
38181
38182 cache->fstop_percent = fstop;
38183@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
38184 if (args[0] != '%' || args[1] != '\0')
38185 return -EINVAL;
38186
38187- if (bstop < 0 || bstop >= cache->bcull_percent)
38188+ if (bstop >= cache->bcull_percent)
38189 return cachefiles_daemon_range_error(cache, args);
38190
38191 cache->bstop_percent = bstop;
38192diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
38193--- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
38194+++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
38195@@ -57,7 +57,7 @@ struct cachefiles_cache {
38196 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
38197 struct rb_root active_nodes; /* active nodes (can't be culled) */
38198 rwlock_t active_lock; /* lock for active_nodes */
38199- atomic_t gravecounter; /* graveyard uniquifier */
38200+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
38201 unsigned frun_percent; /* when to stop culling (% files) */
38202 unsigned fcull_percent; /* when to start culling (% files) */
38203 unsigned fstop_percent; /* when to stop allocating (% files) */
38204@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
38205 * proc.c
38206 */
38207 #ifdef CONFIG_CACHEFILES_HISTOGRAM
38208-extern atomic_t cachefiles_lookup_histogram[HZ];
38209-extern atomic_t cachefiles_mkdir_histogram[HZ];
38210-extern atomic_t cachefiles_create_histogram[HZ];
38211+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
38212+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
38213+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
38214
38215 extern int __init cachefiles_proc_init(void);
38216 extern void cachefiles_proc_cleanup(void);
38217 static inline
38218-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
38219+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
38220 {
38221 unsigned long jif = jiffies - start_jif;
38222 if (jif >= HZ)
38223 jif = HZ - 1;
38224- atomic_inc(&histogram[jif]);
38225+ atomic_inc_unchecked(&histogram[jif]);
38226 }
38227
38228 #else
38229diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
38230--- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
38231+++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
38232@@ -318,7 +318,7 @@ try_again:
38233 /* first step is to make up a grave dentry in the graveyard */
38234 sprintf(nbuffer, "%08x%08x",
38235 (uint32_t) get_seconds(),
38236- (uint32_t) atomic_inc_return(&cache->gravecounter));
38237+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
38238
38239 /* do the multiway lock magic */
38240 trap = lock_rename(cache->graveyard, dir);
38241diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
38242--- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
38243+++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
38244@@ -14,9 +14,9 @@
38245 #include <linux/seq_file.h>
38246 #include "internal.h"
38247
38248-atomic_t cachefiles_lookup_histogram[HZ];
38249-atomic_t cachefiles_mkdir_histogram[HZ];
38250-atomic_t cachefiles_create_histogram[HZ];
38251+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
38252+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
38253+atomic_unchecked_t cachefiles_create_histogram[HZ];
38254
38255 /*
38256 * display the latency histogram
38257@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
38258 return 0;
38259 default:
38260 index = (unsigned long) v - 3;
38261- x = atomic_read(&cachefiles_lookup_histogram[index]);
38262- y = atomic_read(&cachefiles_mkdir_histogram[index]);
38263- z = atomic_read(&cachefiles_create_histogram[index]);
38264+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
38265+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
38266+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
38267 if (x == 0 && y == 0 && z == 0)
38268 return 0;
38269
38270diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
38271--- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
38272+++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
38273@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
38274 old_fs = get_fs();
38275 set_fs(KERNEL_DS);
38276 ret = file->f_op->write(
38277- file, (const void __user *) data, len, &pos);
38278+ file, (__force const void __user *) data, len, &pos);
38279 set_fs(old_fs);
38280 kunmap(page);
38281 if (ret != len)
38282diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
38283--- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
38284+++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
38285@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
38286 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
38287 struct ceph_mds_client *mdsc = fsc->mdsc;
38288 unsigned frag = fpos_frag(filp->f_pos);
38289- int off = fpos_off(filp->f_pos);
38290+ unsigned int off = fpos_off(filp->f_pos);
38291 int err;
38292 u32 ftype;
38293 struct ceph_mds_reply_info_parsed *rinfo;
38294diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
38295--- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
38296+++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
38297@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
38298
38299 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
38300 #ifdef CONFIG_CIFS_STATS2
38301- atomic_set(&totBufAllocCount, 0);
38302- atomic_set(&totSmBufAllocCount, 0);
38303+ atomic_set_unchecked(&totBufAllocCount, 0);
38304+ atomic_set_unchecked(&totSmBufAllocCount, 0);
38305 #endif /* CONFIG_CIFS_STATS2 */
38306 spin_lock(&cifs_tcp_ses_lock);
38307 list_for_each(tmp1, &cifs_tcp_ses_list) {
38308@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
38309 tcon = list_entry(tmp3,
38310 struct cifs_tcon,
38311 tcon_list);
38312- atomic_set(&tcon->num_smbs_sent, 0);
38313- atomic_set(&tcon->num_writes, 0);
38314- atomic_set(&tcon->num_reads, 0);
38315- atomic_set(&tcon->num_oplock_brks, 0);
38316- atomic_set(&tcon->num_opens, 0);
38317- atomic_set(&tcon->num_posixopens, 0);
38318- atomic_set(&tcon->num_posixmkdirs, 0);
38319- atomic_set(&tcon->num_closes, 0);
38320- atomic_set(&tcon->num_deletes, 0);
38321- atomic_set(&tcon->num_mkdirs, 0);
38322- atomic_set(&tcon->num_rmdirs, 0);
38323- atomic_set(&tcon->num_renames, 0);
38324- atomic_set(&tcon->num_t2renames, 0);
38325- atomic_set(&tcon->num_ffirst, 0);
38326- atomic_set(&tcon->num_fnext, 0);
38327- atomic_set(&tcon->num_fclose, 0);
38328- atomic_set(&tcon->num_hardlinks, 0);
38329- atomic_set(&tcon->num_symlinks, 0);
38330- atomic_set(&tcon->num_locks, 0);
38331+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
38332+ atomic_set_unchecked(&tcon->num_writes, 0);
38333+ atomic_set_unchecked(&tcon->num_reads, 0);
38334+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
38335+ atomic_set_unchecked(&tcon->num_opens, 0);
38336+ atomic_set_unchecked(&tcon->num_posixopens, 0);
38337+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
38338+ atomic_set_unchecked(&tcon->num_closes, 0);
38339+ atomic_set_unchecked(&tcon->num_deletes, 0);
38340+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
38341+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
38342+ atomic_set_unchecked(&tcon->num_renames, 0);
38343+ atomic_set_unchecked(&tcon->num_t2renames, 0);
38344+ atomic_set_unchecked(&tcon->num_ffirst, 0);
38345+ atomic_set_unchecked(&tcon->num_fnext, 0);
38346+ atomic_set_unchecked(&tcon->num_fclose, 0);
38347+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
38348+ atomic_set_unchecked(&tcon->num_symlinks, 0);
38349+ atomic_set_unchecked(&tcon->num_locks, 0);
38350 }
38351 }
38352 }
38353@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
38354 smBufAllocCount.counter, cifs_min_small);
38355 #ifdef CONFIG_CIFS_STATS2
38356 seq_printf(m, "Total Large %d Small %d Allocations\n",
38357- atomic_read(&totBufAllocCount),
38358- atomic_read(&totSmBufAllocCount));
38359+ atomic_read_unchecked(&totBufAllocCount),
38360+ atomic_read_unchecked(&totSmBufAllocCount));
38361 #endif /* CONFIG_CIFS_STATS2 */
38362
38363 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
38364@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
38365 if (tcon->need_reconnect)
38366 seq_puts(m, "\tDISCONNECTED ");
38367 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
38368- atomic_read(&tcon->num_smbs_sent),
38369- atomic_read(&tcon->num_oplock_brks));
38370+ atomic_read_unchecked(&tcon->num_smbs_sent),
38371+ atomic_read_unchecked(&tcon->num_oplock_brks));
38372 seq_printf(m, "\nReads: %d Bytes: %lld",
38373- atomic_read(&tcon->num_reads),
38374+ atomic_read_unchecked(&tcon->num_reads),
38375 (long long)(tcon->bytes_read));
38376 seq_printf(m, "\nWrites: %d Bytes: %lld",
38377- atomic_read(&tcon->num_writes),
38378+ atomic_read_unchecked(&tcon->num_writes),
38379 (long long)(tcon->bytes_written));
38380 seq_printf(m, "\nFlushes: %d",
38381- atomic_read(&tcon->num_flushes));
38382+ atomic_read_unchecked(&tcon->num_flushes));
38383 seq_printf(m, "\nLocks: %d HardLinks: %d "
38384 "Symlinks: %d",
38385- atomic_read(&tcon->num_locks),
38386- atomic_read(&tcon->num_hardlinks),
38387- atomic_read(&tcon->num_symlinks));
38388+ atomic_read_unchecked(&tcon->num_locks),
38389+ atomic_read_unchecked(&tcon->num_hardlinks),
38390+ atomic_read_unchecked(&tcon->num_symlinks));
38391 seq_printf(m, "\nOpens: %d Closes: %d "
38392 "Deletes: %d",
38393- atomic_read(&tcon->num_opens),
38394- atomic_read(&tcon->num_closes),
38395- atomic_read(&tcon->num_deletes));
38396+ atomic_read_unchecked(&tcon->num_opens),
38397+ atomic_read_unchecked(&tcon->num_closes),
38398+ atomic_read_unchecked(&tcon->num_deletes));
38399 seq_printf(m, "\nPosix Opens: %d "
38400 "Posix Mkdirs: %d",
38401- atomic_read(&tcon->num_posixopens),
38402- atomic_read(&tcon->num_posixmkdirs));
38403+ atomic_read_unchecked(&tcon->num_posixopens),
38404+ atomic_read_unchecked(&tcon->num_posixmkdirs));
38405 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
38406- atomic_read(&tcon->num_mkdirs),
38407- atomic_read(&tcon->num_rmdirs));
38408+ atomic_read_unchecked(&tcon->num_mkdirs),
38409+ atomic_read_unchecked(&tcon->num_rmdirs));
38410 seq_printf(m, "\nRenames: %d T2 Renames %d",
38411- atomic_read(&tcon->num_renames),
38412- atomic_read(&tcon->num_t2renames));
38413+ atomic_read_unchecked(&tcon->num_renames),
38414+ atomic_read_unchecked(&tcon->num_t2renames));
38415 seq_printf(m, "\nFindFirst: %d FNext %d "
38416 "FClose %d",
38417- atomic_read(&tcon->num_ffirst),
38418- atomic_read(&tcon->num_fnext),
38419- atomic_read(&tcon->num_fclose));
38420+ atomic_read_unchecked(&tcon->num_ffirst),
38421+ atomic_read_unchecked(&tcon->num_fnext),
38422+ atomic_read_unchecked(&tcon->num_fclose));
38423 }
38424 }
38425 }
38426diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
38427--- linux-3.0.4/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
38428+++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
38429@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
38430 cifs_req_cachep = kmem_cache_create("cifs_request",
38431 CIFSMaxBufSize +
38432 MAX_CIFS_HDR_SIZE, 0,
38433- SLAB_HWCACHE_ALIGN, NULL);
38434+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
38435 if (cifs_req_cachep == NULL)
38436 return -ENOMEM;
38437
38438@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
38439 efficient to alloc 1 per page off the slab compared to 17K (5page)
38440 alloc of large cifs buffers even when page debugging is on */
38441 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
38442- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
38443+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
38444 NULL);
38445 if (cifs_sm_req_cachep == NULL) {
38446 mempool_destroy(cifs_req_poolp);
38447@@ -1106,8 +1106,8 @@ init_cifs(void)
38448 atomic_set(&bufAllocCount, 0);
38449 atomic_set(&smBufAllocCount, 0);
38450 #ifdef CONFIG_CIFS_STATS2
38451- atomic_set(&totBufAllocCount, 0);
38452- atomic_set(&totSmBufAllocCount, 0);
38453+ atomic_set_unchecked(&totBufAllocCount, 0);
38454+ atomic_set_unchecked(&totSmBufAllocCount, 0);
38455 #endif /* CONFIG_CIFS_STATS2 */
38456
38457 atomic_set(&midCount, 0);
38458diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
38459--- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
38460+++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
38461@@ -381,28 +381,28 @@ struct cifs_tcon {
38462 __u16 Flags; /* optional support bits */
38463 enum statusEnum tidStatus;
38464 #ifdef CONFIG_CIFS_STATS
38465- atomic_t num_smbs_sent;
38466- atomic_t num_writes;
38467- atomic_t num_reads;
38468- atomic_t num_flushes;
38469- atomic_t num_oplock_brks;
38470- atomic_t num_opens;
38471- atomic_t num_closes;
38472- atomic_t num_deletes;
38473- atomic_t num_mkdirs;
38474- atomic_t num_posixopens;
38475- atomic_t num_posixmkdirs;
38476- atomic_t num_rmdirs;
38477- atomic_t num_renames;
38478- atomic_t num_t2renames;
38479- atomic_t num_ffirst;
38480- atomic_t num_fnext;
38481- atomic_t num_fclose;
38482- atomic_t num_hardlinks;
38483- atomic_t num_symlinks;
38484- atomic_t num_locks;
38485- atomic_t num_acl_get;
38486- atomic_t num_acl_set;
38487+ atomic_unchecked_t num_smbs_sent;
38488+ atomic_unchecked_t num_writes;
38489+ atomic_unchecked_t num_reads;
38490+ atomic_unchecked_t num_flushes;
38491+ atomic_unchecked_t num_oplock_brks;
38492+ atomic_unchecked_t num_opens;
38493+ atomic_unchecked_t num_closes;
38494+ atomic_unchecked_t num_deletes;
38495+ atomic_unchecked_t num_mkdirs;
38496+ atomic_unchecked_t num_posixopens;
38497+ atomic_unchecked_t num_posixmkdirs;
38498+ atomic_unchecked_t num_rmdirs;
38499+ atomic_unchecked_t num_renames;
38500+ atomic_unchecked_t num_t2renames;
38501+ atomic_unchecked_t num_ffirst;
38502+ atomic_unchecked_t num_fnext;
38503+ atomic_unchecked_t num_fclose;
38504+ atomic_unchecked_t num_hardlinks;
38505+ atomic_unchecked_t num_symlinks;
38506+ atomic_unchecked_t num_locks;
38507+ atomic_unchecked_t num_acl_get;
38508+ atomic_unchecked_t num_acl_set;
38509 #ifdef CONFIG_CIFS_STATS2
38510 unsigned long long time_writes;
38511 unsigned long long time_reads;
38512@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
38513 }
38514
38515 #ifdef CONFIG_CIFS_STATS
38516-#define cifs_stats_inc atomic_inc
38517+#define cifs_stats_inc atomic_inc_unchecked
38518
38519 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
38520 unsigned int bytes)
38521@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
38522 /* Various Debug counters */
38523 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
38524 #ifdef CONFIG_CIFS_STATS2
38525-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
38526-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
38527+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
38528+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
38529 #endif
38530 GLOBAL_EXTERN atomic_t smBufAllocCount;
38531 GLOBAL_EXTERN atomic_t midCount;
38532diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
38533--- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
38534+++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
38535@@ -587,7 +587,7 @@ symlink_exit:
38536
38537 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
38538 {
38539- char *p = nd_get_link(nd);
38540+ const char *p = nd_get_link(nd);
38541 if (!IS_ERR(p))
38542 kfree(p);
38543 }
38544diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
38545--- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
38546+++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
38547@@ -156,7 +156,7 @@ cifs_buf_get(void)
38548 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
38549 atomic_inc(&bufAllocCount);
38550 #ifdef CONFIG_CIFS_STATS2
38551- atomic_inc(&totBufAllocCount);
38552+ atomic_inc_unchecked(&totBufAllocCount);
38553 #endif /* CONFIG_CIFS_STATS2 */
38554 }
38555
38556@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
38557 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
38558 atomic_inc(&smBufAllocCount);
38559 #ifdef CONFIG_CIFS_STATS2
38560- atomic_inc(&totSmBufAllocCount);
38561+ atomic_inc_unchecked(&totSmBufAllocCount);
38562 #endif /* CONFIG_CIFS_STATS2 */
38563
38564 }
38565diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
38566--- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
38567+++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
38568@@ -24,7 +24,7 @@
38569 #include "coda_linux.h"
38570 #include "coda_cache.h"
38571
38572-static atomic_t permission_epoch = ATOMIC_INIT(0);
38573+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38574
38575 /* replace or extend an acl cache hit */
38576 void coda_cache_enter(struct inode *inode, int mask)
38577@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
38578 struct coda_inode_info *cii = ITOC(inode);
38579
38580 spin_lock(&cii->c_lock);
38581- cii->c_cached_epoch = atomic_read(&permission_epoch);
38582+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38583 if (cii->c_uid != current_fsuid()) {
38584 cii->c_uid = current_fsuid();
38585 cii->c_cached_perm = mask;
38586@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
38587 {
38588 struct coda_inode_info *cii = ITOC(inode);
38589 spin_lock(&cii->c_lock);
38590- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38591+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38592 spin_unlock(&cii->c_lock);
38593 }
38594
38595 /* remove all acl caches */
38596 void coda_cache_clear_all(struct super_block *sb)
38597 {
38598- atomic_inc(&permission_epoch);
38599+ atomic_inc_unchecked(&permission_epoch);
38600 }
38601
38602
38603@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
38604 spin_lock(&cii->c_lock);
38605 hit = (mask & cii->c_cached_perm) == mask &&
38606 cii->c_uid == current_fsuid() &&
38607- cii->c_cached_epoch == atomic_read(&permission_epoch);
38608+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38609 spin_unlock(&cii->c_lock);
38610
38611 return hit;
38612diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
38613--- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
38614+++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
38615@@ -30,11 +30,13 @@
38616 #undef elf_phdr
38617 #undef elf_shdr
38618 #undef elf_note
38619+#undef elf_dyn
38620 #undef elf_addr_t
38621 #define elfhdr elf32_hdr
38622 #define elf_phdr elf32_phdr
38623 #define elf_shdr elf32_shdr
38624 #define elf_note elf32_note
38625+#define elf_dyn Elf32_Dyn
38626 #define elf_addr_t Elf32_Addr
38627
38628 /*
38629diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
38630--- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
38631+++ linux-3.0.4/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
38632@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
38633 goto out;
38634
38635 ret = -EINVAL;
38636- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
38637+ if (nr_segs > UIO_MAXIOV)
38638 goto out;
38639 if (nr_segs > fast_segs) {
38640 ret = -ENOMEM;
38641@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
38642
38643 struct compat_readdir_callback {
38644 struct compat_old_linux_dirent __user *dirent;
38645+ struct file * file;
38646 int result;
38647 };
38648
38649@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
38650 buf->result = -EOVERFLOW;
38651 return -EOVERFLOW;
38652 }
38653+
38654+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38655+ return 0;
38656+
38657 buf->result++;
38658 dirent = buf->dirent;
38659 if (!access_ok(VERIFY_WRITE, dirent,
38660@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
38661
38662 buf.result = 0;
38663 buf.dirent = dirent;
38664+ buf.file = file;
38665
38666 error = vfs_readdir(file, compat_fillonedir, &buf);
38667 if (buf.result)
38668@@ -917,6 +923,7 @@ struct compat_linux_dirent {
38669 struct compat_getdents_callback {
38670 struct compat_linux_dirent __user *current_dir;
38671 struct compat_linux_dirent __user *previous;
38672+ struct file * file;
38673 int count;
38674 int error;
38675 };
38676@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
38677 buf->error = -EOVERFLOW;
38678 return -EOVERFLOW;
38679 }
38680+
38681+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38682+ return 0;
38683+
38684 dirent = buf->previous;
38685 if (dirent) {
38686 if (__put_user(offset, &dirent->d_off))
38687@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
38688 buf.previous = NULL;
38689 buf.count = count;
38690 buf.error = 0;
38691+ buf.file = file;
38692
38693 error = vfs_readdir(file, compat_filldir, &buf);
38694 if (error >= 0)
38695@@ -1006,6 +1018,7 @@ out:
38696 struct compat_getdents_callback64 {
38697 struct linux_dirent64 __user *current_dir;
38698 struct linux_dirent64 __user *previous;
38699+ struct file * file;
38700 int count;
38701 int error;
38702 };
38703@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
38704 buf->error = -EINVAL; /* only used if we fail.. */
38705 if (reclen > buf->count)
38706 return -EINVAL;
38707+
38708+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38709+ return 0;
38710+
38711 dirent = buf->previous;
38712
38713 if (dirent) {
38714@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
38715 buf.previous = NULL;
38716 buf.count = count;
38717 buf.error = 0;
38718+ buf.file = file;
38719
38720 error = vfs_readdir(file, compat_filldir64, &buf);
38721 if (error >= 0)
38722@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
38723 struct fdtable *fdt;
38724 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38725
38726+ pax_track_stack();
38727+
38728 if (n < 0)
38729 goto out_nofds;
38730
38731diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
38732--- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
38733+++ linux-3.0.4/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
38734@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
38735
38736 err = get_user(palp, &up->palette);
38737 err |= get_user(length, &up->length);
38738+ if (err)
38739+ return -EFAULT;
38740
38741 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38742 err = put_user(compat_ptr(palp), &up_native->palette);
38743@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
38744 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
38745 {
38746 unsigned int a, b;
38747- a = *(unsigned int *)p;
38748- b = *(unsigned int *)q;
38749+ a = *(const unsigned int *)p;
38750+ b = *(const unsigned int *)q;
38751 if (a > b)
38752 return 1;
38753 if (a < b)
38754diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
38755--- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38756+++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38757@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
38758 }
38759 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38760 struct configfs_dirent *next;
38761- const char * name;
38762+ const unsigned char * name;
38763+ char d_name[sizeof(next->s_dentry->d_iname)];
38764 int len;
38765 struct inode *inode = NULL;
38766
38767@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
38768 continue;
38769
38770 name = configfs_get_name(next);
38771- len = strlen(name);
38772+ if (next->s_dentry && name == next->s_dentry->d_iname) {
38773+ len = next->s_dentry->d_name.len;
38774+ memcpy(d_name, name, len);
38775+ name = d_name;
38776+ } else
38777+ len = strlen(name);
38778
38779 /*
38780 * We'll have a dentry and an inode for
38781diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
38782--- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
38783+++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
38784@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
38785 mempages -= reserve;
38786
38787 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38788- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38789+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38790
38791 dcache_init();
38792 inode_init();
38793diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
38794--- linux-3.0.4/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
38795+++ linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38796@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
38797 old_fs = get_fs();
38798 set_fs(get_ds());
38799 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38800- (char __user *)lower_buf,
38801+ (__force char __user *)lower_buf,
38802 lower_bufsiz);
38803 set_fs(old_fs);
38804 if (rc < 0)
38805@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
38806 }
38807 old_fs = get_fs();
38808 set_fs(get_ds());
38809- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38810+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38811 set_fs(old_fs);
38812 if (rc < 0) {
38813 kfree(buf);
38814@@ -765,7 +765,7 @@ out:
38815 static void
38816 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
38817 {
38818- char *buf = nd_get_link(nd);
38819+ const char *buf = nd_get_link(nd);
38820 if (!IS_ERR(buf)) {
38821 /* Free the char* */
38822 kfree(buf);
38823diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
38824--- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
38825+++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
38826@@ -328,7 +328,7 @@ check_list:
38827 goto out_unlock_msg_ctx;
38828 i = 5;
38829 if (msg_ctx->msg) {
38830- if (copy_to_user(&buf[i], packet_length, packet_length_size))
38831+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
38832 goto out_unlock_msg_ctx;
38833 i += packet_length_size;
38834 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
38835diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
38836--- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
38837+++ linux-3.0.4/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
38838@@ -55,12 +55,24 @@
38839 #include <linux/pipe_fs_i.h>
38840 #include <linux/oom.h>
38841 #include <linux/compat.h>
38842+#include <linux/random.h>
38843+#include <linux/seq_file.h>
38844+
38845+#ifdef CONFIG_PAX_REFCOUNT
38846+#include <linux/kallsyms.h>
38847+#include <linux/kdebug.h>
38848+#endif
38849
38850 #include <asm/uaccess.h>
38851 #include <asm/mmu_context.h>
38852 #include <asm/tlb.h>
38853 #include "internal.h"
38854
38855+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38856+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38857+EXPORT_SYMBOL(pax_set_initial_flags_func);
38858+#endif
38859+
38860 int core_uses_pid;
38861 char core_pattern[CORENAME_MAX_SIZE] = "core";
38862 unsigned int core_pipe_limit;
38863@@ -70,7 +82,7 @@ struct core_name {
38864 char *corename;
38865 int used, size;
38866 };
38867-static atomic_t call_count = ATOMIC_INIT(1);
38868+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
38869
38870 /* The maximal length of core_pattern is also specified in sysctl.c */
38871
38872@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38873 char *tmp = getname(library);
38874 int error = PTR_ERR(tmp);
38875 static const struct open_flags uselib_flags = {
38876- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38877+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38878 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
38879 .intent = LOOKUP_OPEN
38880 };
38881@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
38882 int write)
38883 {
38884 struct page *page;
38885- int ret;
38886
38887-#ifdef CONFIG_STACK_GROWSUP
38888- if (write) {
38889- ret = expand_downwards(bprm->vma, pos);
38890- if (ret < 0)
38891- return NULL;
38892- }
38893-#endif
38894- ret = get_user_pages(current, bprm->mm, pos,
38895- 1, write, 1, &page, NULL);
38896- if (ret <= 0)
38897+ if (0 > expand_downwards(bprm->vma, pos))
38898+ return NULL;
38899+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38900 return NULL;
38901
38902 if (write) {
38903@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
38904 vma->vm_end = STACK_TOP_MAX;
38905 vma->vm_start = vma->vm_end - PAGE_SIZE;
38906 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
38907+
38908+#ifdef CONFIG_PAX_SEGMEXEC
38909+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38910+#endif
38911+
38912 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38913 INIT_LIST_HEAD(&vma->anon_vma_chain);
38914
38915@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
38916 mm->stack_vm = mm->total_vm = 1;
38917 up_write(&mm->mmap_sem);
38918 bprm->p = vma->vm_end - sizeof(void *);
38919+
38920+#ifdef CONFIG_PAX_RANDUSTACK
38921+ if (randomize_va_space)
38922+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38923+#endif
38924+
38925 return 0;
38926 err:
38927 up_write(&mm->mmap_sem);
38928@@ -403,19 +418,7 @@ err:
38929 return err;
38930 }
38931
38932-struct user_arg_ptr {
38933-#ifdef CONFIG_COMPAT
38934- bool is_compat;
38935-#endif
38936- union {
38937- const char __user *const __user *native;
38938-#ifdef CONFIG_COMPAT
38939- compat_uptr_t __user *compat;
38940-#endif
38941- } ptr;
38942-};
38943-
38944-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38945+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38946 {
38947 const char __user *native;
38948
38949@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
38950 int r;
38951 mm_segment_t oldfs = get_fs();
38952 struct user_arg_ptr argv = {
38953- .ptr.native = (const char __user *const __user *)__argv,
38954+ .ptr.native = (__force const char __user *const __user *)__argv,
38955 };
38956
38957 set_fs(KERNEL_DS);
38958@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
38959 unsigned long new_end = old_end - shift;
38960 struct mmu_gather tlb;
38961
38962- BUG_ON(new_start > new_end);
38963+ if (new_start >= new_end || new_start < mmap_min_addr)
38964+ return -ENOMEM;
38965
38966 /*
38967 * ensure there are no vmas between where we want to go
38968@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
38969 if (vma != find_vma(mm, new_start))
38970 return -EFAULT;
38971
38972+#ifdef CONFIG_PAX_SEGMEXEC
38973+ BUG_ON(pax_find_mirror_vma(vma));
38974+#endif
38975+
38976 /*
38977 * cover the whole range: [new_start, old_end)
38978 */
38979@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
38980 stack_top = arch_align_stack(stack_top);
38981 stack_top = PAGE_ALIGN(stack_top);
38982
38983- if (unlikely(stack_top < mmap_min_addr) ||
38984- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38985- return -ENOMEM;
38986-
38987 stack_shift = vma->vm_end - stack_top;
38988
38989 bprm->p -= stack_shift;
38990@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
38991 bprm->exec -= stack_shift;
38992
38993 down_write(&mm->mmap_sem);
38994+
38995+ /* Move stack pages down in memory. */
38996+ if (stack_shift) {
38997+ ret = shift_arg_pages(vma, stack_shift);
38998+ if (ret)
38999+ goto out_unlock;
39000+ }
39001+
39002 vm_flags = VM_STACK_FLAGS;
39003
39004+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39005+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39006+ vm_flags &= ~VM_EXEC;
39007+
39008+#ifdef CONFIG_PAX_MPROTECT
39009+ if (mm->pax_flags & MF_PAX_MPROTECT)
39010+ vm_flags &= ~VM_MAYEXEC;
39011+#endif
39012+
39013+ }
39014+#endif
39015+
39016 /*
39017 * Adjust stack execute permissions; explicitly enable for
39018 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
39019@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
39020 goto out_unlock;
39021 BUG_ON(prev != vma);
39022
39023- /* Move stack pages down in memory. */
39024- if (stack_shift) {
39025- ret = shift_arg_pages(vma, stack_shift);
39026- if (ret)
39027- goto out_unlock;
39028- }
39029-
39030 /* mprotect_fixup is overkill to remove the temporary stack flags */
39031 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
39032
39033@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
39034 struct file *file;
39035 int err;
39036 static const struct open_flags open_exec_flags = {
39037- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
39038+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
39039 .acc_mode = MAY_EXEC | MAY_OPEN,
39040 .intent = LOOKUP_OPEN
39041 };
39042@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
39043 old_fs = get_fs();
39044 set_fs(get_ds());
39045 /* The cast to a user pointer is valid due to the set_fs() */
39046- result = vfs_read(file, (void __user *)addr, count, &pos);
39047+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
39048 set_fs(old_fs);
39049 return result;
39050 }
39051@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
39052 }
39053 rcu_read_unlock();
39054
39055- if (p->fs->users > n_fs) {
39056+ if (atomic_read(&p->fs->users) > n_fs) {
39057 bprm->unsafe |= LSM_UNSAFE_SHARE;
39058 } else {
39059 res = -EAGAIN;
39060@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
39061 struct user_arg_ptr envp,
39062 struct pt_regs *regs)
39063 {
39064+#ifdef CONFIG_GRKERNSEC
39065+ struct file *old_exec_file;
39066+ struct acl_subject_label *old_acl;
39067+ struct rlimit old_rlim[RLIM_NLIMITS];
39068+#endif
39069 struct linux_binprm *bprm;
39070 struct file *file;
39071 struct files_struct *displaced;
39072 bool clear_in_exec;
39073 int retval;
39074+ const struct cred *cred = current_cred();
39075+
39076+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
39077+
39078+ /*
39079+ * We move the actual failure in case of RLIMIT_NPROC excess from
39080+ * set*uid() to execve() because too many poorly written programs
39081+ * don't check setuid() return code. Here we additionally recheck
39082+ * whether NPROC limit is still exceeded.
39083+ */
39084+ if ((current->flags & PF_NPROC_EXCEEDED) &&
39085+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
39086+ retval = -EAGAIN;
39087+ goto out_ret;
39088+ }
39089+
39090+ /* We're below the limit (still or again), so we don't want to make
39091+ * further execve() calls fail. */
39092+ current->flags &= ~PF_NPROC_EXCEEDED;
39093
39094 retval = unshare_files(&displaced);
39095 if (retval)
39096@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
39097 bprm->filename = filename;
39098 bprm->interp = filename;
39099
39100+ if (gr_process_user_ban()) {
39101+ retval = -EPERM;
39102+ goto out_file;
39103+ }
39104+
39105+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
39106+ retval = -EACCES;
39107+ goto out_file;
39108+ }
39109+
39110 retval = bprm_mm_init(bprm);
39111 if (retval)
39112 goto out_file;
39113@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
39114 if (retval < 0)
39115 goto out;
39116
39117+ if (!gr_tpe_allow(file)) {
39118+ retval = -EACCES;
39119+ goto out;
39120+ }
39121+
39122+ if (gr_check_crash_exec(file)) {
39123+ retval = -EACCES;
39124+ goto out;
39125+ }
39126+
39127+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
39128+
39129+ gr_handle_exec_args(bprm, argv);
39130+
39131+#ifdef CONFIG_GRKERNSEC
39132+ old_acl = current->acl;
39133+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
39134+ old_exec_file = current->exec_file;
39135+ get_file(file);
39136+ current->exec_file = file;
39137+#endif
39138+
39139+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
39140+ bprm->unsafe & LSM_UNSAFE_SHARE);
39141+ if (retval < 0)
39142+ goto out_fail;
39143+
39144 retval = search_binary_handler(bprm,regs);
39145 if (retval < 0)
39146- goto out;
39147+ goto out_fail;
39148+#ifdef CONFIG_GRKERNSEC
39149+ if (old_exec_file)
39150+ fput(old_exec_file);
39151+#endif
39152
39153 /* execve succeeded */
39154 current->fs->in_exec = 0;
39155@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
39156 put_files_struct(displaced);
39157 return retval;
39158
39159+out_fail:
39160+#ifdef CONFIG_GRKERNSEC
39161+ current->acl = old_acl;
39162+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
39163+ fput(current->exec_file);
39164+ current->exec_file = old_exec_file;
39165+#endif
39166+
39167 out:
39168 if (bprm->mm) {
39169 acct_arg_size(bprm, 0);
39170@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
39171 {
39172 char *old_corename = cn->corename;
39173
39174- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
39175+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
39176 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
39177
39178 if (!cn->corename) {
39179@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
39180 int pid_in_pattern = 0;
39181 int err = 0;
39182
39183- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
39184+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
39185 cn->corename = kmalloc(cn->size, GFP_KERNEL);
39186 cn->used = 0;
39187
39188@@ -1758,6 +1848,219 @@ out:
39189 return ispipe;
39190 }
39191
39192+int pax_check_flags(unsigned long *flags)
39193+{
39194+ int retval = 0;
39195+
39196+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
39197+ if (*flags & MF_PAX_SEGMEXEC)
39198+ {
39199+ *flags &= ~MF_PAX_SEGMEXEC;
39200+ retval = -EINVAL;
39201+ }
39202+#endif
39203+
39204+ if ((*flags & MF_PAX_PAGEEXEC)
39205+
39206+#ifdef CONFIG_PAX_PAGEEXEC
39207+ && (*flags & MF_PAX_SEGMEXEC)
39208+#endif
39209+
39210+ )
39211+ {
39212+ *flags &= ~MF_PAX_PAGEEXEC;
39213+ retval = -EINVAL;
39214+ }
39215+
39216+ if ((*flags & MF_PAX_MPROTECT)
39217+
39218+#ifdef CONFIG_PAX_MPROTECT
39219+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
39220+#endif
39221+
39222+ )
39223+ {
39224+ *flags &= ~MF_PAX_MPROTECT;
39225+ retval = -EINVAL;
39226+ }
39227+
39228+ if ((*flags & MF_PAX_EMUTRAMP)
39229+
39230+#ifdef CONFIG_PAX_EMUTRAMP
39231+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
39232+#endif
39233+
39234+ )
39235+ {
39236+ *flags &= ~MF_PAX_EMUTRAMP;
39237+ retval = -EINVAL;
39238+ }
39239+
39240+ return retval;
39241+}
39242+
39243+EXPORT_SYMBOL(pax_check_flags);
39244+
39245+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39246+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
39247+{
39248+ struct task_struct *tsk = current;
39249+ struct mm_struct *mm = current->mm;
39250+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
39251+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
39252+ char *path_exec = NULL;
39253+ char *path_fault = NULL;
39254+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
39255+
39256+ if (buffer_exec && buffer_fault) {
39257+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
39258+
39259+ down_read(&mm->mmap_sem);
39260+ vma = mm->mmap;
39261+ while (vma && (!vma_exec || !vma_fault)) {
39262+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
39263+ vma_exec = vma;
39264+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
39265+ vma_fault = vma;
39266+ vma = vma->vm_next;
39267+ }
39268+ if (vma_exec) {
39269+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
39270+ if (IS_ERR(path_exec))
39271+ path_exec = "<path too long>";
39272+ else {
39273+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
39274+ if (path_exec) {
39275+ *path_exec = 0;
39276+ path_exec = buffer_exec;
39277+ } else
39278+ path_exec = "<path too long>";
39279+ }
39280+ }
39281+ if (vma_fault) {
39282+ start = vma_fault->vm_start;
39283+ end = vma_fault->vm_end;
39284+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
39285+ if (vma_fault->vm_file) {
39286+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
39287+ if (IS_ERR(path_fault))
39288+ path_fault = "<path too long>";
39289+ else {
39290+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
39291+ if (path_fault) {
39292+ *path_fault = 0;
39293+ path_fault = buffer_fault;
39294+ } else
39295+ path_fault = "<path too long>";
39296+ }
39297+ } else
39298+ path_fault = "<anonymous mapping>";
39299+ }
39300+ up_read(&mm->mmap_sem);
39301+ }
39302+ if (tsk->signal->curr_ip)
39303+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
39304+ else
39305+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
39306+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
39307+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
39308+ task_uid(tsk), task_euid(tsk), pc, sp);
39309+ free_page((unsigned long)buffer_exec);
39310+ free_page((unsigned long)buffer_fault);
39311+ pax_report_insns(pc, sp);
39312+ do_coredump(SIGKILL, SIGKILL, regs);
39313+}
39314+#endif
39315+
39316+#ifdef CONFIG_PAX_REFCOUNT
39317+void pax_report_refcount_overflow(struct pt_regs *regs)
39318+{
39319+ if (current->signal->curr_ip)
39320+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
39321+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
39322+ else
39323+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
39324+ current->comm, task_pid_nr(current), current_uid(), current_euid());
39325+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
39326+ show_regs(regs);
39327+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
39328+}
39329+#endif
39330+
39331+#ifdef CONFIG_PAX_USERCOPY
39332+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
39333+int object_is_on_stack(const void *obj, unsigned long len)
39334+{
39335+ const void * const stack = task_stack_page(current);
39336+ const void * const stackend = stack + THREAD_SIZE;
39337+
39338+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
39339+ const void *frame = NULL;
39340+ const void *oldframe;
39341+#endif
39342+
39343+ if (obj + len < obj)
39344+ return -1;
39345+
39346+ if (obj + len <= stack || stackend <= obj)
39347+ return 0;
39348+
39349+ if (obj < stack || stackend < obj + len)
39350+ return -1;
39351+
39352+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
39353+ oldframe = __builtin_frame_address(1);
39354+ if (oldframe)
39355+ frame = __builtin_frame_address(2);
39356+ /*
39357+ low ----------------------------------------------> high
39358+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
39359+ ^----------------^
39360+ allow copies only within here
39361+ */
39362+ while (stack <= frame && frame < stackend) {
39363+ /* if obj + len extends past the last frame, this
39364+ check won't pass and the next frame will be 0,
39365+ causing us to bail out and correctly report
39366+ the copy as invalid
39367+ */
39368+ if (obj + len <= frame)
39369+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
39370+ oldframe = frame;
39371+ frame = *(const void * const *)frame;
39372+ }
39373+ return -1;
39374+#else
39375+ return 1;
39376+#endif
39377+}
39378+
39379+
39380+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
39381+{
39382+ if (current->signal->curr_ip)
39383+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
39384+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
39385+ else
39386+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
39387+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
39388+ dump_stack();
39389+ gr_handle_kernel_exploit();
39390+ do_group_exit(SIGKILL);
39391+}
39392+#endif
39393+
39394+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
39395+void pax_track_stack(void)
39396+{
39397+ unsigned long sp = (unsigned long)&sp;
39398+ if (sp < current_thread_info()->lowest_stack &&
39399+ sp > (unsigned long)task_stack_page(current))
39400+ current_thread_info()->lowest_stack = sp;
39401+}
39402+EXPORT_SYMBOL(pax_track_stack);
39403+#endif
39404+
39405 static int zap_process(struct task_struct *start, int exit_code)
39406 {
39407 struct task_struct *t;
39408@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
39409 pipe = file->f_path.dentry->d_inode->i_pipe;
39410
39411 pipe_lock(pipe);
39412- pipe->readers++;
39413- pipe->writers--;
39414+ atomic_inc(&pipe->readers);
39415+ atomic_dec(&pipe->writers);
39416
39417- while ((pipe->readers > 1) && (!signal_pending(current))) {
39418+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
39419 wake_up_interruptible_sync(&pipe->wait);
39420 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39421 pipe_wait(pipe);
39422 }
39423
39424- pipe->readers--;
39425- pipe->writers++;
39426+ atomic_dec(&pipe->readers);
39427+ atomic_inc(&pipe->writers);
39428 pipe_unlock(pipe);
39429
39430 }
39431@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
39432 int retval = 0;
39433 int flag = 0;
39434 int ispipe;
39435- static atomic_t core_dump_count = ATOMIC_INIT(0);
39436+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
39437 struct coredump_params cprm = {
39438 .signr = signr,
39439 .regs = regs,
39440@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
39441
39442 audit_core_dumps(signr);
39443
39444+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
39445+ gr_handle_brute_attach(current, cprm.mm_flags);
39446+
39447 binfmt = mm->binfmt;
39448 if (!binfmt || !binfmt->core_dump)
39449 goto fail;
39450@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
39451 goto fail_corename;
39452 }
39453
39454+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
39455+
39456 if (ispipe) {
39457 int dump_count;
39458 char **helper_argv;
39459@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
39460 }
39461 cprm.limit = RLIM_INFINITY;
39462
39463- dump_count = atomic_inc_return(&core_dump_count);
39464+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
39465 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
39466 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
39467 task_tgid_vnr(current), current->comm);
39468@@ -2192,7 +2500,7 @@ close_fail:
39469 filp_close(cprm.file, NULL);
39470 fail_dropcount:
39471 if (ispipe)
39472- atomic_dec(&core_dump_count);
39473+ atomic_dec_unchecked(&core_dump_count);
39474 fail_unlock:
39475 kfree(cn.corename);
39476 fail_corename:
39477diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
39478--- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
39479+++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
39480@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
39481
39482 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
39483 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
39484- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
39485+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
39486 sbi->s_resuid != current_fsuid() &&
39487 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
39488 return 0;
39489diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
39490--- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
39491+++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
39492@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
39493
39494 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
39495 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
39496- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
39497+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
39498 sbi->s_resuid != current_fsuid() &&
39499 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
39500 return 0;
39501diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
39502--- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
39503+++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
39504@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
39505 /* Hm, nope. Are (enough) root reserved blocks available? */
39506 if (sbi->s_resuid == current_fsuid() ||
39507 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
39508- capable(CAP_SYS_RESOURCE) ||
39509- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
39510+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
39511+ capable_nolog(CAP_SYS_RESOURCE)) {
39512
39513 if (free_blocks >= (nblocks + dirty_blocks))
39514 return 1;
39515diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
39516--- linux-3.0.4/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
39517+++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
39518@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
39519 unsigned long s_mb_last_start;
39520
39521 /* stats for buddy allocator */
39522- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
39523- atomic_t s_bal_success; /* we found long enough chunks */
39524- atomic_t s_bal_allocated; /* in blocks */
39525- atomic_t s_bal_ex_scanned; /* total extents scanned */
39526- atomic_t s_bal_goals; /* goal hits */
39527- atomic_t s_bal_breaks; /* too long searches */
39528- atomic_t s_bal_2orders; /* 2^order hits */
39529+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
39530+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
39531+ atomic_unchecked_t s_bal_allocated; /* in blocks */
39532+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
39533+ atomic_unchecked_t s_bal_goals; /* goal hits */
39534+ atomic_unchecked_t s_bal_breaks; /* too long searches */
39535+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
39536 spinlock_t s_bal_lock;
39537 unsigned long s_mb_buddies_generated;
39538 unsigned long long s_mb_generation_time;
39539- atomic_t s_mb_lost_chunks;
39540- atomic_t s_mb_preallocated;
39541- atomic_t s_mb_discarded;
39542+ atomic_unchecked_t s_mb_lost_chunks;
39543+ atomic_unchecked_t s_mb_preallocated;
39544+ atomic_unchecked_t s_mb_discarded;
39545 atomic_t s_lock_busy;
39546
39547 /* locality groups */
39548diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
39549--- linux-3.0.4/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
39550+++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
39551@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
39552 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39553
39554 if (EXT4_SB(sb)->s_mb_stats)
39555- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39556+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39557
39558 break;
39559 }
39560@@ -2087,7 +2087,7 @@ repeat:
39561 ac->ac_status = AC_STATUS_CONTINUE;
39562 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39563 cr = 3;
39564- atomic_inc(&sbi->s_mb_lost_chunks);
39565+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39566 goto repeat;
39567 }
39568 }
39569@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
39570 ext4_grpblk_t counters[16];
39571 } sg;
39572
39573+ pax_track_stack();
39574+
39575 group--;
39576 if (group == 0)
39577 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39578@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
39579 if (sbi->s_mb_stats) {
39580 printk(KERN_INFO
39581 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39582- atomic_read(&sbi->s_bal_allocated),
39583- atomic_read(&sbi->s_bal_reqs),
39584- atomic_read(&sbi->s_bal_success));
39585+ atomic_read_unchecked(&sbi->s_bal_allocated),
39586+ atomic_read_unchecked(&sbi->s_bal_reqs),
39587+ atomic_read_unchecked(&sbi->s_bal_success));
39588 printk(KERN_INFO
39589 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39590 "%u 2^N hits, %u breaks, %u lost\n",
39591- atomic_read(&sbi->s_bal_ex_scanned),
39592- atomic_read(&sbi->s_bal_goals),
39593- atomic_read(&sbi->s_bal_2orders),
39594- atomic_read(&sbi->s_bal_breaks),
39595- atomic_read(&sbi->s_mb_lost_chunks));
39596+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39597+ atomic_read_unchecked(&sbi->s_bal_goals),
39598+ atomic_read_unchecked(&sbi->s_bal_2orders),
39599+ atomic_read_unchecked(&sbi->s_bal_breaks),
39600+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39601 printk(KERN_INFO
39602 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39603 sbi->s_mb_buddies_generated++,
39604 sbi->s_mb_generation_time);
39605 printk(KERN_INFO
39606 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39607- atomic_read(&sbi->s_mb_preallocated),
39608- atomic_read(&sbi->s_mb_discarded));
39609+ atomic_read_unchecked(&sbi->s_mb_preallocated),
39610+ atomic_read_unchecked(&sbi->s_mb_discarded));
39611 }
39612
39613 free_percpu(sbi->s_locality_groups);
39614@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
39615 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39616
39617 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39618- atomic_inc(&sbi->s_bal_reqs);
39619- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39620+ atomic_inc_unchecked(&sbi->s_bal_reqs);
39621+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39622 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
39623- atomic_inc(&sbi->s_bal_success);
39624- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39625+ atomic_inc_unchecked(&sbi->s_bal_success);
39626+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39627 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39628 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39629- atomic_inc(&sbi->s_bal_goals);
39630+ atomic_inc_unchecked(&sbi->s_bal_goals);
39631 if (ac->ac_found > sbi->s_mb_max_to_scan)
39632- atomic_inc(&sbi->s_bal_breaks);
39633+ atomic_inc_unchecked(&sbi->s_bal_breaks);
39634 }
39635
39636 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39637@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39638 trace_ext4_mb_new_inode_pa(ac, pa);
39639
39640 ext4_mb_use_inode_pa(ac, pa);
39641- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39642+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39643
39644 ei = EXT4_I(ac->ac_inode);
39645 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39646@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39647 trace_ext4_mb_new_group_pa(ac, pa);
39648
39649 ext4_mb_use_group_pa(ac, pa);
39650- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39651+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39652
39653 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39654 lg = ac->ac_lg;
39655@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39656 * from the bitmap and continue.
39657 */
39658 }
39659- atomic_add(free, &sbi->s_mb_discarded);
39660+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
39661
39662 return err;
39663 }
39664@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39665 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39666 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39667 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39668- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39669+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39670 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
39671
39672 return 0;
39673diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
39674--- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
39675+++ linux-3.0.4/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
39676@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
39677 if (err)
39678 return err;
39679
39680+ if (gr_handle_chroot_fowner(pid, type))
39681+ return -ENOENT;
39682+ if (gr_check_protected_task_fowner(pid, type))
39683+ return -EACCES;
39684+
39685 f_modown(filp, pid, type, force);
39686 return 0;
39687 }
39688@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
39689 switch (cmd) {
39690 case F_DUPFD:
39691 case F_DUPFD_CLOEXEC:
39692+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39693 if (arg >= rlimit(RLIMIT_NOFILE))
39694 break;
39695 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39696@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
39697 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
39698 * is defined as O_NONBLOCK on some platforms and not on others.
39699 */
39700- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39701+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39702 O_RDONLY | O_WRONLY | O_RDWR |
39703 O_CREAT | O_EXCL | O_NOCTTY |
39704 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
39705 __O_SYNC | O_DSYNC | FASYNC |
39706 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
39707 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
39708- __FMODE_EXEC | O_PATH
39709+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
39710 ));
39711
39712 fasync_cache = kmem_cache_create("fasync_cache",
39713diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
39714--- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
39715+++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
39716@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
39717 */
39718 filp->f_op = &read_pipefifo_fops;
39719 pipe->r_counter++;
39720- if (pipe->readers++ == 0)
39721+ if (atomic_inc_return(&pipe->readers) == 1)
39722 wake_up_partner(inode);
39723
39724- if (!pipe->writers) {
39725+ if (!atomic_read(&pipe->writers)) {
39726 if ((filp->f_flags & O_NONBLOCK)) {
39727 /* suppress POLLHUP until we have
39728 * seen a writer */
39729@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
39730 * errno=ENXIO when there is no process reading the FIFO.
39731 */
39732 ret = -ENXIO;
39733- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39734+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39735 goto err;
39736
39737 filp->f_op = &write_pipefifo_fops;
39738 pipe->w_counter++;
39739- if (!pipe->writers++)
39740+ if (atomic_inc_return(&pipe->writers) == 1)
39741 wake_up_partner(inode);
39742
39743- if (!pipe->readers) {
39744+ if (!atomic_read(&pipe->readers)) {
39745 wait_for_partner(inode, &pipe->r_counter);
39746 if (signal_pending(current))
39747 goto err_wr;
39748@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
39749 */
39750 filp->f_op = &rdwr_pipefifo_fops;
39751
39752- pipe->readers++;
39753- pipe->writers++;
39754+ atomic_inc(&pipe->readers);
39755+ atomic_inc(&pipe->writers);
39756 pipe->r_counter++;
39757 pipe->w_counter++;
39758- if (pipe->readers == 1 || pipe->writers == 1)
39759+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39760 wake_up_partner(inode);
39761 break;
39762
39763@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
39764 return 0;
39765
39766 err_rd:
39767- if (!--pipe->readers)
39768+ if (atomic_dec_and_test(&pipe->readers))
39769 wake_up_interruptible(&pipe->wait);
39770 ret = -ERESTARTSYS;
39771 goto err;
39772
39773 err_wr:
39774- if (!--pipe->writers)
39775+ if (atomic_dec_and_test(&pipe->writers))
39776 wake_up_interruptible(&pipe->wait);
39777 ret = -ERESTARTSYS;
39778 goto err;
39779
39780 err:
39781- if (!pipe->readers && !pipe->writers)
39782+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39783 free_pipe_info(inode);
39784
39785 err_nocleanup:
39786diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
39787--- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
39788+++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
39789@@ -15,6 +15,7 @@
39790 #include <linux/slab.h>
39791 #include <linux/vmalloc.h>
39792 #include <linux/file.h>
39793+#include <linux/security.h>
39794 #include <linux/fdtable.h>
39795 #include <linux/bitops.h>
39796 #include <linux/interrupt.h>
39797@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
39798 * N.B. For clone tasks sharing a files structure, this test
39799 * will limit the total number of files that can be opened.
39800 */
39801+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39802 if (nr >= rlimit(RLIMIT_NOFILE))
39803 return -EMFILE;
39804
39805diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
39806--- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
39807+++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
39808@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
39809 int len = dot ? dot - name : strlen(name);
39810
39811 fs = __get_fs_type(name, len);
39812+
39813+#ifdef CONFIG_GRKERNSEC_MODHARDEN
39814+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39815+#else
39816 if (!fs && (request_module("%.*s", len, name) == 0))
39817+#endif
39818 fs = __get_fs_type(name, len);
39819
39820 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39821diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
39822--- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
39823+++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
39824@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39825 parent ? (char *) parent->def->name : "<no-parent>",
39826 def->name, netfs_data);
39827
39828- fscache_stat(&fscache_n_acquires);
39829+ fscache_stat_unchecked(&fscache_n_acquires);
39830
39831 /* if there's no parent cookie, then we don't create one here either */
39832 if (!parent) {
39833- fscache_stat(&fscache_n_acquires_null);
39834+ fscache_stat_unchecked(&fscache_n_acquires_null);
39835 _leave(" [no parent]");
39836 return NULL;
39837 }
39838@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39839 /* allocate and initialise a cookie */
39840 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39841 if (!cookie) {
39842- fscache_stat(&fscache_n_acquires_oom);
39843+ fscache_stat_unchecked(&fscache_n_acquires_oom);
39844 _leave(" [ENOMEM]");
39845 return NULL;
39846 }
39847@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39848
39849 switch (cookie->def->type) {
39850 case FSCACHE_COOKIE_TYPE_INDEX:
39851- fscache_stat(&fscache_n_cookie_index);
39852+ fscache_stat_unchecked(&fscache_n_cookie_index);
39853 break;
39854 case FSCACHE_COOKIE_TYPE_DATAFILE:
39855- fscache_stat(&fscache_n_cookie_data);
39856+ fscache_stat_unchecked(&fscache_n_cookie_data);
39857 break;
39858 default:
39859- fscache_stat(&fscache_n_cookie_special);
39860+ fscache_stat_unchecked(&fscache_n_cookie_special);
39861 break;
39862 }
39863
39864@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39865 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39866 atomic_dec(&parent->n_children);
39867 __fscache_cookie_put(cookie);
39868- fscache_stat(&fscache_n_acquires_nobufs);
39869+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39870 _leave(" = NULL");
39871 return NULL;
39872 }
39873 }
39874
39875- fscache_stat(&fscache_n_acquires_ok);
39876+ fscache_stat_unchecked(&fscache_n_acquires_ok);
39877 _leave(" = %p", cookie);
39878 return cookie;
39879 }
39880@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39881 cache = fscache_select_cache_for_object(cookie->parent);
39882 if (!cache) {
39883 up_read(&fscache_addremove_sem);
39884- fscache_stat(&fscache_n_acquires_no_cache);
39885+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39886 _leave(" = -ENOMEDIUM [no cache]");
39887 return -ENOMEDIUM;
39888 }
39889@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39890 object = cache->ops->alloc_object(cache, cookie);
39891 fscache_stat_d(&fscache_n_cop_alloc_object);
39892 if (IS_ERR(object)) {
39893- fscache_stat(&fscache_n_object_no_alloc);
39894+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
39895 ret = PTR_ERR(object);
39896 goto error;
39897 }
39898
39899- fscache_stat(&fscache_n_object_alloc);
39900+ fscache_stat_unchecked(&fscache_n_object_alloc);
39901
39902 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39903
39904@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39905 struct fscache_object *object;
39906 struct hlist_node *_p;
39907
39908- fscache_stat(&fscache_n_updates);
39909+ fscache_stat_unchecked(&fscache_n_updates);
39910
39911 if (!cookie) {
39912- fscache_stat(&fscache_n_updates_null);
39913+ fscache_stat_unchecked(&fscache_n_updates_null);
39914 _leave(" [no cookie]");
39915 return;
39916 }
39917@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39918 struct fscache_object *object;
39919 unsigned long event;
39920
39921- fscache_stat(&fscache_n_relinquishes);
39922+ fscache_stat_unchecked(&fscache_n_relinquishes);
39923 if (retire)
39924- fscache_stat(&fscache_n_relinquishes_retire);
39925+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39926
39927 if (!cookie) {
39928- fscache_stat(&fscache_n_relinquishes_null);
39929+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
39930 _leave(" [no cookie]");
39931 return;
39932 }
39933@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39934
39935 /* wait for the cookie to finish being instantiated (or to fail) */
39936 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39937- fscache_stat(&fscache_n_relinquishes_waitcrt);
39938+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39939 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39940 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39941 }
39942diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
39943--- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
39944+++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
39945@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
39946 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39947 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39948
39949-extern atomic_t fscache_n_op_pend;
39950-extern atomic_t fscache_n_op_run;
39951-extern atomic_t fscache_n_op_enqueue;
39952-extern atomic_t fscache_n_op_deferred_release;
39953-extern atomic_t fscache_n_op_release;
39954-extern atomic_t fscache_n_op_gc;
39955-extern atomic_t fscache_n_op_cancelled;
39956-extern atomic_t fscache_n_op_rejected;
39957-
39958-extern atomic_t fscache_n_attr_changed;
39959-extern atomic_t fscache_n_attr_changed_ok;
39960-extern atomic_t fscache_n_attr_changed_nobufs;
39961-extern atomic_t fscache_n_attr_changed_nomem;
39962-extern atomic_t fscache_n_attr_changed_calls;
39963-
39964-extern atomic_t fscache_n_allocs;
39965-extern atomic_t fscache_n_allocs_ok;
39966-extern atomic_t fscache_n_allocs_wait;
39967-extern atomic_t fscache_n_allocs_nobufs;
39968-extern atomic_t fscache_n_allocs_intr;
39969-extern atomic_t fscache_n_allocs_object_dead;
39970-extern atomic_t fscache_n_alloc_ops;
39971-extern atomic_t fscache_n_alloc_op_waits;
39972-
39973-extern atomic_t fscache_n_retrievals;
39974-extern atomic_t fscache_n_retrievals_ok;
39975-extern atomic_t fscache_n_retrievals_wait;
39976-extern atomic_t fscache_n_retrievals_nodata;
39977-extern atomic_t fscache_n_retrievals_nobufs;
39978-extern atomic_t fscache_n_retrievals_intr;
39979-extern atomic_t fscache_n_retrievals_nomem;
39980-extern atomic_t fscache_n_retrievals_object_dead;
39981-extern atomic_t fscache_n_retrieval_ops;
39982-extern atomic_t fscache_n_retrieval_op_waits;
39983-
39984-extern atomic_t fscache_n_stores;
39985-extern atomic_t fscache_n_stores_ok;
39986-extern atomic_t fscache_n_stores_again;
39987-extern atomic_t fscache_n_stores_nobufs;
39988-extern atomic_t fscache_n_stores_oom;
39989-extern atomic_t fscache_n_store_ops;
39990-extern atomic_t fscache_n_store_calls;
39991-extern atomic_t fscache_n_store_pages;
39992-extern atomic_t fscache_n_store_radix_deletes;
39993-extern atomic_t fscache_n_store_pages_over_limit;
39994-
39995-extern atomic_t fscache_n_store_vmscan_not_storing;
39996-extern atomic_t fscache_n_store_vmscan_gone;
39997-extern atomic_t fscache_n_store_vmscan_busy;
39998-extern atomic_t fscache_n_store_vmscan_cancelled;
39999-
40000-extern atomic_t fscache_n_marks;
40001-extern atomic_t fscache_n_uncaches;
40002-
40003-extern atomic_t fscache_n_acquires;
40004-extern atomic_t fscache_n_acquires_null;
40005-extern atomic_t fscache_n_acquires_no_cache;
40006-extern atomic_t fscache_n_acquires_ok;
40007-extern atomic_t fscache_n_acquires_nobufs;
40008-extern atomic_t fscache_n_acquires_oom;
40009-
40010-extern atomic_t fscache_n_updates;
40011-extern atomic_t fscache_n_updates_null;
40012-extern atomic_t fscache_n_updates_run;
40013-
40014-extern atomic_t fscache_n_relinquishes;
40015-extern atomic_t fscache_n_relinquishes_null;
40016-extern atomic_t fscache_n_relinquishes_waitcrt;
40017-extern atomic_t fscache_n_relinquishes_retire;
40018-
40019-extern atomic_t fscache_n_cookie_index;
40020-extern atomic_t fscache_n_cookie_data;
40021-extern atomic_t fscache_n_cookie_special;
40022-
40023-extern atomic_t fscache_n_object_alloc;
40024-extern atomic_t fscache_n_object_no_alloc;
40025-extern atomic_t fscache_n_object_lookups;
40026-extern atomic_t fscache_n_object_lookups_negative;
40027-extern atomic_t fscache_n_object_lookups_positive;
40028-extern atomic_t fscache_n_object_lookups_timed_out;
40029-extern atomic_t fscache_n_object_created;
40030-extern atomic_t fscache_n_object_avail;
40031-extern atomic_t fscache_n_object_dead;
40032-
40033-extern atomic_t fscache_n_checkaux_none;
40034-extern atomic_t fscache_n_checkaux_okay;
40035-extern atomic_t fscache_n_checkaux_update;
40036-extern atomic_t fscache_n_checkaux_obsolete;
40037+extern atomic_unchecked_t fscache_n_op_pend;
40038+extern atomic_unchecked_t fscache_n_op_run;
40039+extern atomic_unchecked_t fscache_n_op_enqueue;
40040+extern atomic_unchecked_t fscache_n_op_deferred_release;
40041+extern atomic_unchecked_t fscache_n_op_release;
40042+extern atomic_unchecked_t fscache_n_op_gc;
40043+extern atomic_unchecked_t fscache_n_op_cancelled;
40044+extern atomic_unchecked_t fscache_n_op_rejected;
40045+
40046+extern atomic_unchecked_t fscache_n_attr_changed;
40047+extern atomic_unchecked_t fscache_n_attr_changed_ok;
40048+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
40049+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
40050+extern atomic_unchecked_t fscache_n_attr_changed_calls;
40051+
40052+extern atomic_unchecked_t fscache_n_allocs;
40053+extern atomic_unchecked_t fscache_n_allocs_ok;
40054+extern atomic_unchecked_t fscache_n_allocs_wait;
40055+extern atomic_unchecked_t fscache_n_allocs_nobufs;
40056+extern atomic_unchecked_t fscache_n_allocs_intr;
40057+extern atomic_unchecked_t fscache_n_allocs_object_dead;
40058+extern atomic_unchecked_t fscache_n_alloc_ops;
40059+extern atomic_unchecked_t fscache_n_alloc_op_waits;
40060+
40061+extern atomic_unchecked_t fscache_n_retrievals;
40062+extern atomic_unchecked_t fscache_n_retrievals_ok;
40063+extern atomic_unchecked_t fscache_n_retrievals_wait;
40064+extern atomic_unchecked_t fscache_n_retrievals_nodata;
40065+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
40066+extern atomic_unchecked_t fscache_n_retrievals_intr;
40067+extern atomic_unchecked_t fscache_n_retrievals_nomem;
40068+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
40069+extern atomic_unchecked_t fscache_n_retrieval_ops;
40070+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
40071+
40072+extern atomic_unchecked_t fscache_n_stores;
40073+extern atomic_unchecked_t fscache_n_stores_ok;
40074+extern atomic_unchecked_t fscache_n_stores_again;
40075+extern atomic_unchecked_t fscache_n_stores_nobufs;
40076+extern atomic_unchecked_t fscache_n_stores_oom;
40077+extern atomic_unchecked_t fscache_n_store_ops;
40078+extern atomic_unchecked_t fscache_n_store_calls;
40079+extern atomic_unchecked_t fscache_n_store_pages;
40080+extern atomic_unchecked_t fscache_n_store_radix_deletes;
40081+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
40082+
40083+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40084+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
40085+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
40086+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40087+
40088+extern atomic_unchecked_t fscache_n_marks;
40089+extern atomic_unchecked_t fscache_n_uncaches;
40090+
40091+extern atomic_unchecked_t fscache_n_acquires;
40092+extern atomic_unchecked_t fscache_n_acquires_null;
40093+extern atomic_unchecked_t fscache_n_acquires_no_cache;
40094+extern atomic_unchecked_t fscache_n_acquires_ok;
40095+extern atomic_unchecked_t fscache_n_acquires_nobufs;
40096+extern atomic_unchecked_t fscache_n_acquires_oom;
40097+
40098+extern atomic_unchecked_t fscache_n_updates;
40099+extern atomic_unchecked_t fscache_n_updates_null;
40100+extern atomic_unchecked_t fscache_n_updates_run;
40101+
40102+extern atomic_unchecked_t fscache_n_relinquishes;
40103+extern atomic_unchecked_t fscache_n_relinquishes_null;
40104+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40105+extern atomic_unchecked_t fscache_n_relinquishes_retire;
40106+
40107+extern atomic_unchecked_t fscache_n_cookie_index;
40108+extern atomic_unchecked_t fscache_n_cookie_data;
40109+extern atomic_unchecked_t fscache_n_cookie_special;
40110+
40111+extern atomic_unchecked_t fscache_n_object_alloc;
40112+extern atomic_unchecked_t fscache_n_object_no_alloc;
40113+extern atomic_unchecked_t fscache_n_object_lookups;
40114+extern atomic_unchecked_t fscache_n_object_lookups_negative;
40115+extern atomic_unchecked_t fscache_n_object_lookups_positive;
40116+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
40117+extern atomic_unchecked_t fscache_n_object_created;
40118+extern atomic_unchecked_t fscache_n_object_avail;
40119+extern atomic_unchecked_t fscache_n_object_dead;
40120+
40121+extern atomic_unchecked_t fscache_n_checkaux_none;
40122+extern atomic_unchecked_t fscache_n_checkaux_okay;
40123+extern atomic_unchecked_t fscache_n_checkaux_update;
40124+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
40125
40126 extern atomic_t fscache_n_cop_alloc_object;
40127 extern atomic_t fscache_n_cop_lookup_object;
40128@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
40129 atomic_inc(stat);
40130 }
40131
40132+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
40133+{
40134+ atomic_inc_unchecked(stat);
40135+}
40136+
40137 static inline void fscache_stat_d(atomic_t *stat)
40138 {
40139 atomic_dec(stat);
40140@@ -267,6 +272,7 @@ extern const struct file_operations fsca
40141
40142 #define __fscache_stat(stat) (NULL)
40143 #define fscache_stat(stat) do {} while (0)
40144+#define fscache_stat_unchecked(stat) do {} while (0)
40145 #define fscache_stat_d(stat) do {} while (0)
40146 #endif
40147
40148diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
40149--- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
40150+++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
40151@@ -128,7 +128,7 @@ static void fscache_object_state_machine
40152 /* update the object metadata on disk */
40153 case FSCACHE_OBJECT_UPDATING:
40154 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
40155- fscache_stat(&fscache_n_updates_run);
40156+ fscache_stat_unchecked(&fscache_n_updates_run);
40157 fscache_stat(&fscache_n_cop_update_object);
40158 object->cache->ops->update_object(object);
40159 fscache_stat_d(&fscache_n_cop_update_object);
40160@@ -217,7 +217,7 @@ static void fscache_object_state_machine
40161 spin_lock(&object->lock);
40162 object->state = FSCACHE_OBJECT_DEAD;
40163 spin_unlock(&object->lock);
40164- fscache_stat(&fscache_n_object_dead);
40165+ fscache_stat_unchecked(&fscache_n_object_dead);
40166 goto terminal_transit;
40167
40168 /* handle the parent cache of this object being withdrawn from
40169@@ -232,7 +232,7 @@ static void fscache_object_state_machine
40170 spin_lock(&object->lock);
40171 object->state = FSCACHE_OBJECT_DEAD;
40172 spin_unlock(&object->lock);
40173- fscache_stat(&fscache_n_object_dead);
40174+ fscache_stat_unchecked(&fscache_n_object_dead);
40175 goto terminal_transit;
40176
40177 /* complain about the object being woken up once it is
40178@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
40179 parent->cookie->def->name, cookie->def->name,
40180 object->cache->tag->name);
40181
40182- fscache_stat(&fscache_n_object_lookups);
40183+ fscache_stat_unchecked(&fscache_n_object_lookups);
40184 fscache_stat(&fscache_n_cop_lookup_object);
40185 ret = object->cache->ops->lookup_object(object);
40186 fscache_stat_d(&fscache_n_cop_lookup_object);
40187@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
40188 if (ret == -ETIMEDOUT) {
40189 /* probably stuck behind another object, so move this one to
40190 * the back of the queue */
40191- fscache_stat(&fscache_n_object_lookups_timed_out);
40192+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
40193 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
40194 }
40195
40196@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
40197
40198 spin_lock(&object->lock);
40199 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
40200- fscache_stat(&fscache_n_object_lookups_negative);
40201+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
40202
40203 /* transit here to allow write requests to begin stacking up
40204 * and read requests to begin returning ENODATA */
40205@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
40206 * result, in which case there may be data available */
40207 spin_lock(&object->lock);
40208 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
40209- fscache_stat(&fscache_n_object_lookups_positive);
40210+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
40211
40212 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
40213
40214@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
40215 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
40216 } else {
40217 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
40218- fscache_stat(&fscache_n_object_created);
40219+ fscache_stat_unchecked(&fscache_n_object_created);
40220
40221 object->state = FSCACHE_OBJECT_AVAILABLE;
40222 spin_unlock(&object->lock);
40223@@ -602,7 +602,7 @@ static void fscache_object_available(str
40224 fscache_enqueue_dependents(object);
40225
40226 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
40227- fscache_stat(&fscache_n_object_avail);
40228+ fscache_stat_unchecked(&fscache_n_object_avail);
40229
40230 _leave("");
40231 }
40232@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
40233 enum fscache_checkaux result;
40234
40235 if (!object->cookie->def->check_aux) {
40236- fscache_stat(&fscache_n_checkaux_none);
40237+ fscache_stat_unchecked(&fscache_n_checkaux_none);
40238 return FSCACHE_CHECKAUX_OKAY;
40239 }
40240
40241@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
40242 switch (result) {
40243 /* entry okay as is */
40244 case FSCACHE_CHECKAUX_OKAY:
40245- fscache_stat(&fscache_n_checkaux_okay);
40246+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
40247 break;
40248
40249 /* entry requires update */
40250 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
40251- fscache_stat(&fscache_n_checkaux_update);
40252+ fscache_stat_unchecked(&fscache_n_checkaux_update);
40253 break;
40254
40255 /* entry requires deletion */
40256 case FSCACHE_CHECKAUX_OBSOLETE:
40257- fscache_stat(&fscache_n_checkaux_obsolete);
40258+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
40259 break;
40260
40261 default:
40262diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
40263--- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
40264+++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
40265@@ -17,7 +17,7 @@
40266 #include <linux/slab.h>
40267 #include "internal.h"
40268
40269-atomic_t fscache_op_debug_id;
40270+atomic_unchecked_t fscache_op_debug_id;
40271 EXPORT_SYMBOL(fscache_op_debug_id);
40272
40273 /**
40274@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
40275 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
40276 ASSERTCMP(atomic_read(&op->usage), >, 0);
40277
40278- fscache_stat(&fscache_n_op_enqueue);
40279+ fscache_stat_unchecked(&fscache_n_op_enqueue);
40280 switch (op->flags & FSCACHE_OP_TYPE) {
40281 case FSCACHE_OP_ASYNC:
40282 _debug("queue async");
40283@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
40284 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
40285 if (op->processor)
40286 fscache_enqueue_operation(op);
40287- fscache_stat(&fscache_n_op_run);
40288+ fscache_stat_unchecked(&fscache_n_op_run);
40289 }
40290
40291 /*
40292@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
40293 if (object->n_ops > 1) {
40294 atomic_inc(&op->usage);
40295 list_add_tail(&op->pend_link, &object->pending_ops);
40296- fscache_stat(&fscache_n_op_pend);
40297+ fscache_stat_unchecked(&fscache_n_op_pend);
40298 } else if (!list_empty(&object->pending_ops)) {
40299 atomic_inc(&op->usage);
40300 list_add_tail(&op->pend_link, &object->pending_ops);
40301- fscache_stat(&fscache_n_op_pend);
40302+ fscache_stat_unchecked(&fscache_n_op_pend);
40303 fscache_start_operations(object);
40304 } else {
40305 ASSERTCMP(object->n_in_progress, ==, 0);
40306@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
40307 object->n_exclusive++; /* reads and writes must wait */
40308 atomic_inc(&op->usage);
40309 list_add_tail(&op->pend_link, &object->pending_ops);
40310- fscache_stat(&fscache_n_op_pend);
40311+ fscache_stat_unchecked(&fscache_n_op_pend);
40312 ret = 0;
40313 } else {
40314 /* not allowed to submit ops in any other state */
40315@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
40316 if (object->n_exclusive > 0) {
40317 atomic_inc(&op->usage);
40318 list_add_tail(&op->pend_link, &object->pending_ops);
40319- fscache_stat(&fscache_n_op_pend);
40320+ fscache_stat_unchecked(&fscache_n_op_pend);
40321 } else if (!list_empty(&object->pending_ops)) {
40322 atomic_inc(&op->usage);
40323 list_add_tail(&op->pend_link, &object->pending_ops);
40324- fscache_stat(&fscache_n_op_pend);
40325+ fscache_stat_unchecked(&fscache_n_op_pend);
40326 fscache_start_operations(object);
40327 } else {
40328 ASSERTCMP(object->n_exclusive, ==, 0);
40329@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
40330 object->n_ops++;
40331 atomic_inc(&op->usage);
40332 list_add_tail(&op->pend_link, &object->pending_ops);
40333- fscache_stat(&fscache_n_op_pend);
40334+ fscache_stat_unchecked(&fscache_n_op_pend);
40335 ret = 0;
40336 } else if (object->state == FSCACHE_OBJECT_DYING ||
40337 object->state == FSCACHE_OBJECT_LC_DYING ||
40338 object->state == FSCACHE_OBJECT_WITHDRAWING) {
40339- fscache_stat(&fscache_n_op_rejected);
40340+ fscache_stat_unchecked(&fscache_n_op_rejected);
40341 ret = -ENOBUFS;
40342 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
40343 fscache_report_unexpected_submission(object, op, ostate);
40344@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
40345
40346 ret = -EBUSY;
40347 if (!list_empty(&op->pend_link)) {
40348- fscache_stat(&fscache_n_op_cancelled);
40349+ fscache_stat_unchecked(&fscache_n_op_cancelled);
40350 list_del_init(&op->pend_link);
40351 object->n_ops--;
40352 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
40353@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
40354 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
40355 BUG();
40356
40357- fscache_stat(&fscache_n_op_release);
40358+ fscache_stat_unchecked(&fscache_n_op_release);
40359
40360 if (op->release) {
40361 op->release(op);
40362@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
40363 * lock, and defer it otherwise */
40364 if (!spin_trylock(&object->lock)) {
40365 _debug("defer put");
40366- fscache_stat(&fscache_n_op_deferred_release);
40367+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
40368
40369 cache = object->cache;
40370 spin_lock(&cache->op_gc_list_lock);
40371@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
40372
40373 _debug("GC DEFERRED REL OBJ%x OP%x",
40374 object->debug_id, op->debug_id);
40375- fscache_stat(&fscache_n_op_gc);
40376+ fscache_stat_unchecked(&fscache_n_op_gc);
40377
40378 ASSERTCMP(atomic_read(&op->usage), ==, 0);
40379
40380diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
40381--- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
40382+++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
40383@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
40384 val = radix_tree_lookup(&cookie->stores, page->index);
40385 if (!val) {
40386 rcu_read_unlock();
40387- fscache_stat(&fscache_n_store_vmscan_not_storing);
40388+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
40389 __fscache_uncache_page(cookie, page);
40390 return true;
40391 }
40392@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
40393 spin_unlock(&cookie->stores_lock);
40394
40395 if (xpage) {
40396- fscache_stat(&fscache_n_store_vmscan_cancelled);
40397- fscache_stat(&fscache_n_store_radix_deletes);
40398+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
40399+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
40400 ASSERTCMP(xpage, ==, page);
40401 } else {
40402- fscache_stat(&fscache_n_store_vmscan_gone);
40403+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
40404 }
40405
40406 wake_up_bit(&cookie->flags, 0);
40407@@ -107,7 +107,7 @@ page_busy:
40408 /* we might want to wait here, but that could deadlock the allocator as
40409 * the work threads writing to the cache may all end up sleeping
40410 * on memory allocation */
40411- fscache_stat(&fscache_n_store_vmscan_busy);
40412+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
40413 return false;
40414 }
40415 EXPORT_SYMBOL(__fscache_maybe_release_page);
40416@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
40417 FSCACHE_COOKIE_STORING_TAG);
40418 if (!radix_tree_tag_get(&cookie->stores, page->index,
40419 FSCACHE_COOKIE_PENDING_TAG)) {
40420- fscache_stat(&fscache_n_store_radix_deletes);
40421+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
40422 xpage = radix_tree_delete(&cookie->stores, page->index);
40423 }
40424 spin_unlock(&cookie->stores_lock);
40425@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
40426
40427 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
40428
40429- fscache_stat(&fscache_n_attr_changed_calls);
40430+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
40431
40432 if (fscache_object_is_active(object)) {
40433 fscache_stat(&fscache_n_cop_attr_changed);
40434@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
40435
40436 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40437
40438- fscache_stat(&fscache_n_attr_changed);
40439+ fscache_stat_unchecked(&fscache_n_attr_changed);
40440
40441 op = kzalloc(sizeof(*op), GFP_KERNEL);
40442 if (!op) {
40443- fscache_stat(&fscache_n_attr_changed_nomem);
40444+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
40445 _leave(" = -ENOMEM");
40446 return -ENOMEM;
40447 }
40448@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
40449 if (fscache_submit_exclusive_op(object, op) < 0)
40450 goto nobufs;
40451 spin_unlock(&cookie->lock);
40452- fscache_stat(&fscache_n_attr_changed_ok);
40453+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
40454 fscache_put_operation(op);
40455 _leave(" = 0");
40456 return 0;
40457@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
40458 nobufs:
40459 spin_unlock(&cookie->lock);
40460 kfree(op);
40461- fscache_stat(&fscache_n_attr_changed_nobufs);
40462+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
40463 _leave(" = %d", -ENOBUFS);
40464 return -ENOBUFS;
40465 }
40466@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
40467 /* allocate a retrieval operation and attempt to submit it */
40468 op = kzalloc(sizeof(*op), GFP_NOIO);
40469 if (!op) {
40470- fscache_stat(&fscache_n_retrievals_nomem);
40471+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40472 return NULL;
40473 }
40474
40475@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
40476 return 0;
40477 }
40478
40479- fscache_stat(&fscache_n_retrievals_wait);
40480+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
40481
40482 jif = jiffies;
40483 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
40484 fscache_wait_bit_interruptible,
40485 TASK_INTERRUPTIBLE) != 0) {
40486- fscache_stat(&fscache_n_retrievals_intr);
40487+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40488 _leave(" = -ERESTARTSYS");
40489 return -ERESTARTSYS;
40490 }
40491@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
40492 */
40493 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
40494 struct fscache_retrieval *op,
40495- atomic_t *stat_op_waits,
40496- atomic_t *stat_object_dead)
40497+ atomic_unchecked_t *stat_op_waits,
40498+ atomic_unchecked_t *stat_object_dead)
40499 {
40500 int ret;
40501
40502@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
40503 goto check_if_dead;
40504
40505 _debug(">>> WT");
40506- fscache_stat(stat_op_waits);
40507+ fscache_stat_unchecked(stat_op_waits);
40508 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
40509 fscache_wait_bit_interruptible,
40510 TASK_INTERRUPTIBLE) < 0) {
40511@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
40512
40513 check_if_dead:
40514 if (unlikely(fscache_object_is_dead(object))) {
40515- fscache_stat(stat_object_dead);
40516+ fscache_stat_unchecked(stat_object_dead);
40517 return -ENOBUFS;
40518 }
40519 return 0;
40520@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
40521
40522 _enter("%p,%p,,,", cookie, page);
40523
40524- fscache_stat(&fscache_n_retrievals);
40525+ fscache_stat_unchecked(&fscache_n_retrievals);
40526
40527 if (hlist_empty(&cookie->backing_objects))
40528 goto nobufs;
40529@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
40530 goto nobufs_unlock;
40531 spin_unlock(&cookie->lock);
40532
40533- fscache_stat(&fscache_n_retrieval_ops);
40534+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40535
40536 /* pin the netfs read context in case we need to do the actual netfs
40537 * read because we've encountered a cache read failure */
40538@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
40539
40540 error:
40541 if (ret == -ENOMEM)
40542- fscache_stat(&fscache_n_retrievals_nomem);
40543+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40544 else if (ret == -ERESTARTSYS)
40545- fscache_stat(&fscache_n_retrievals_intr);
40546+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40547 else if (ret == -ENODATA)
40548- fscache_stat(&fscache_n_retrievals_nodata);
40549+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40550 else if (ret < 0)
40551- fscache_stat(&fscache_n_retrievals_nobufs);
40552+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40553 else
40554- fscache_stat(&fscache_n_retrievals_ok);
40555+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40556
40557 fscache_put_retrieval(op);
40558 _leave(" = %d", ret);
40559@@ -429,7 +429,7 @@ nobufs_unlock:
40560 spin_unlock(&cookie->lock);
40561 kfree(op);
40562 nobufs:
40563- fscache_stat(&fscache_n_retrievals_nobufs);
40564+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40565 _leave(" = -ENOBUFS");
40566 return -ENOBUFS;
40567 }
40568@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
40569
40570 _enter("%p,,%d,,,", cookie, *nr_pages);
40571
40572- fscache_stat(&fscache_n_retrievals);
40573+ fscache_stat_unchecked(&fscache_n_retrievals);
40574
40575 if (hlist_empty(&cookie->backing_objects))
40576 goto nobufs;
40577@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
40578 goto nobufs_unlock;
40579 spin_unlock(&cookie->lock);
40580
40581- fscache_stat(&fscache_n_retrieval_ops);
40582+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40583
40584 /* pin the netfs read context in case we need to do the actual netfs
40585 * read because we've encountered a cache read failure */
40586@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
40587
40588 error:
40589 if (ret == -ENOMEM)
40590- fscache_stat(&fscache_n_retrievals_nomem);
40591+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40592 else if (ret == -ERESTARTSYS)
40593- fscache_stat(&fscache_n_retrievals_intr);
40594+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40595 else if (ret == -ENODATA)
40596- fscache_stat(&fscache_n_retrievals_nodata);
40597+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40598 else if (ret < 0)
40599- fscache_stat(&fscache_n_retrievals_nobufs);
40600+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40601 else
40602- fscache_stat(&fscache_n_retrievals_ok);
40603+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40604
40605 fscache_put_retrieval(op);
40606 _leave(" = %d", ret);
40607@@ -545,7 +545,7 @@ nobufs_unlock:
40608 spin_unlock(&cookie->lock);
40609 kfree(op);
40610 nobufs:
40611- fscache_stat(&fscache_n_retrievals_nobufs);
40612+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40613 _leave(" = -ENOBUFS");
40614 return -ENOBUFS;
40615 }
40616@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
40617
40618 _enter("%p,%p,,,", cookie, page);
40619
40620- fscache_stat(&fscache_n_allocs);
40621+ fscache_stat_unchecked(&fscache_n_allocs);
40622
40623 if (hlist_empty(&cookie->backing_objects))
40624 goto nobufs;
40625@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
40626 goto nobufs_unlock;
40627 spin_unlock(&cookie->lock);
40628
40629- fscache_stat(&fscache_n_alloc_ops);
40630+ fscache_stat_unchecked(&fscache_n_alloc_ops);
40631
40632 ret = fscache_wait_for_retrieval_activation(
40633 object, op,
40634@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
40635
40636 error:
40637 if (ret == -ERESTARTSYS)
40638- fscache_stat(&fscache_n_allocs_intr);
40639+ fscache_stat_unchecked(&fscache_n_allocs_intr);
40640 else if (ret < 0)
40641- fscache_stat(&fscache_n_allocs_nobufs);
40642+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40643 else
40644- fscache_stat(&fscache_n_allocs_ok);
40645+ fscache_stat_unchecked(&fscache_n_allocs_ok);
40646
40647 fscache_put_retrieval(op);
40648 _leave(" = %d", ret);
40649@@ -625,7 +625,7 @@ nobufs_unlock:
40650 spin_unlock(&cookie->lock);
40651 kfree(op);
40652 nobufs:
40653- fscache_stat(&fscache_n_allocs_nobufs);
40654+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40655 _leave(" = -ENOBUFS");
40656 return -ENOBUFS;
40657 }
40658@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
40659
40660 spin_lock(&cookie->stores_lock);
40661
40662- fscache_stat(&fscache_n_store_calls);
40663+ fscache_stat_unchecked(&fscache_n_store_calls);
40664
40665 /* find a page to store */
40666 page = NULL;
40667@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
40668 page = results[0];
40669 _debug("gang %d [%lx]", n, page->index);
40670 if (page->index > op->store_limit) {
40671- fscache_stat(&fscache_n_store_pages_over_limit);
40672+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40673 goto superseded;
40674 }
40675
40676@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
40677 spin_unlock(&cookie->stores_lock);
40678 spin_unlock(&object->lock);
40679
40680- fscache_stat(&fscache_n_store_pages);
40681+ fscache_stat_unchecked(&fscache_n_store_pages);
40682 fscache_stat(&fscache_n_cop_write_page);
40683 ret = object->cache->ops->write_page(op, page);
40684 fscache_stat_d(&fscache_n_cop_write_page);
40685@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
40686 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40687 ASSERT(PageFsCache(page));
40688
40689- fscache_stat(&fscache_n_stores);
40690+ fscache_stat_unchecked(&fscache_n_stores);
40691
40692 op = kzalloc(sizeof(*op), GFP_NOIO);
40693 if (!op)
40694@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
40695 spin_unlock(&cookie->stores_lock);
40696 spin_unlock(&object->lock);
40697
40698- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40699+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40700 op->store_limit = object->store_limit;
40701
40702 if (fscache_submit_op(object, &op->op) < 0)
40703@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
40704
40705 spin_unlock(&cookie->lock);
40706 radix_tree_preload_end();
40707- fscache_stat(&fscache_n_store_ops);
40708- fscache_stat(&fscache_n_stores_ok);
40709+ fscache_stat_unchecked(&fscache_n_store_ops);
40710+ fscache_stat_unchecked(&fscache_n_stores_ok);
40711
40712 /* the work queue now carries its own ref on the object */
40713 fscache_put_operation(&op->op);
40714@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
40715 return 0;
40716
40717 already_queued:
40718- fscache_stat(&fscache_n_stores_again);
40719+ fscache_stat_unchecked(&fscache_n_stores_again);
40720 already_pending:
40721 spin_unlock(&cookie->stores_lock);
40722 spin_unlock(&object->lock);
40723 spin_unlock(&cookie->lock);
40724 radix_tree_preload_end();
40725 kfree(op);
40726- fscache_stat(&fscache_n_stores_ok);
40727+ fscache_stat_unchecked(&fscache_n_stores_ok);
40728 _leave(" = 0");
40729 return 0;
40730
40731@@ -851,14 +851,14 @@ nobufs:
40732 spin_unlock(&cookie->lock);
40733 radix_tree_preload_end();
40734 kfree(op);
40735- fscache_stat(&fscache_n_stores_nobufs);
40736+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
40737 _leave(" = -ENOBUFS");
40738 return -ENOBUFS;
40739
40740 nomem_free:
40741 kfree(op);
40742 nomem:
40743- fscache_stat(&fscache_n_stores_oom);
40744+ fscache_stat_unchecked(&fscache_n_stores_oom);
40745 _leave(" = -ENOMEM");
40746 return -ENOMEM;
40747 }
40748@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
40749 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40750 ASSERTCMP(page, !=, NULL);
40751
40752- fscache_stat(&fscache_n_uncaches);
40753+ fscache_stat_unchecked(&fscache_n_uncaches);
40754
40755 /* cache withdrawal may beat us to it */
40756 if (!PageFsCache(page))
40757@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
40758 unsigned long loop;
40759
40760 #ifdef CONFIG_FSCACHE_STATS
40761- atomic_add(pagevec->nr, &fscache_n_marks);
40762+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40763 #endif
40764
40765 for (loop = 0; loop < pagevec->nr; loop++) {
40766diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
40767--- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
40768+++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
40769@@ -18,95 +18,95 @@
40770 /*
40771 * operation counters
40772 */
40773-atomic_t fscache_n_op_pend;
40774-atomic_t fscache_n_op_run;
40775-atomic_t fscache_n_op_enqueue;
40776-atomic_t fscache_n_op_requeue;
40777-atomic_t fscache_n_op_deferred_release;
40778-atomic_t fscache_n_op_release;
40779-atomic_t fscache_n_op_gc;
40780-atomic_t fscache_n_op_cancelled;
40781-atomic_t fscache_n_op_rejected;
40782-
40783-atomic_t fscache_n_attr_changed;
40784-atomic_t fscache_n_attr_changed_ok;
40785-atomic_t fscache_n_attr_changed_nobufs;
40786-atomic_t fscache_n_attr_changed_nomem;
40787-atomic_t fscache_n_attr_changed_calls;
40788-
40789-atomic_t fscache_n_allocs;
40790-atomic_t fscache_n_allocs_ok;
40791-atomic_t fscache_n_allocs_wait;
40792-atomic_t fscache_n_allocs_nobufs;
40793-atomic_t fscache_n_allocs_intr;
40794-atomic_t fscache_n_allocs_object_dead;
40795-atomic_t fscache_n_alloc_ops;
40796-atomic_t fscache_n_alloc_op_waits;
40797-
40798-atomic_t fscache_n_retrievals;
40799-atomic_t fscache_n_retrievals_ok;
40800-atomic_t fscache_n_retrievals_wait;
40801-atomic_t fscache_n_retrievals_nodata;
40802-atomic_t fscache_n_retrievals_nobufs;
40803-atomic_t fscache_n_retrievals_intr;
40804-atomic_t fscache_n_retrievals_nomem;
40805-atomic_t fscache_n_retrievals_object_dead;
40806-atomic_t fscache_n_retrieval_ops;
40807-atomic_t fscache_n_retrieval_op_waits;
40808-
40809-atomic_t fscache_n_stores;
40810-atomic_t fscache_n_stores_ok;
40811-atomic_t fscache_n_stores_again;
40812-atomic_t fscache_n_stores_nobufs;
40813-atomic_t fscache_n_stores_oom;
40814-atomic_t fscache_n_store_ops;
40815-atomic_t fscache_n_store_calls;
40816-atomic_t fscache_n_store_pages;
40817-atomic_t fscache_n_store_radix_deletes;
40818-atomic_t fscache_n_store_pages_over_limit;
40819-
40820-atomic_t fscache_n_store_vmscan_not_storing;
40821-atomic_t fscache_n_store_vmscan_gone;
40822-atomic_t fscache_n_store_vmscan_busy;
40823-atomic_t fscache_n_store_vmscan_cancelled;
40824-
40825-atomic_t fscache_n_marks;
40826-atomic_t fscache_n_uncaches;
40827-
40828-atomic_t fscache_n_acquires;
40829-atomic_t fscache_n_acquires_null;
40830-atomic_t fscache_n_acquires_no_cache;
40831-atomic_t fscache_n_acquires_ok;
40832-atomic_t fscache_n_acquires_nobufs;
40833-atomic_t fscache_n_acquires_oom;
40834-
40835-atomic_t fscache_n_updates;
40836-atomic_t fscache_n_updates_null;
40837-atomic_t fscache_n_updates_run;
40838-
40839-atomic_t fscache_n_relinquishes;
40840-atomic_t fscache_n_relinquishes_null;
40841-atomic_t fscache_n_relinquishes_waitcrt;
40842-atomic_t fscache_n_relinquishes_retire;
40843-
40844-atomic_t fscache_n_cookie_index;
40845-atomic_t fscache_n_cookie_data;
40846-atomic_t fscache_n_cookie_special;
40847-
40848-atomic_t fscache_n_object_alloc;
40849-atomic_t fscache_n_object_no_alloc;
40850-atomic_t fscache_n_object_lookups;
40851-atomic_t fscache_n_object_lookups_negative;
40852-atomic_t fscache_n_object_lookups_positive;
40853-atomic_t fscache_n_object_lookups_timed_out;
40854-atomic_t fscache_n_object_created;
40855-atomic_t fscache_n_object_avail;
40856-atomic_t fscache_n_object_dead;
40857-
40858-atomic_t fscache_n_checkaux_none;
40859-atomic_t fscache_n_checkaux_okay;
40860-atomic_t fscache_n_checkaux_update;
40861-atomic_t fscache_n_checkaux_obsolete;
40862+atomic_unchecked_t fscache_n_op_pend;
40863+atomic_unchecked_t fscache_n_op_run;
40864+atomic_unchecked_t fscache_n_op_enqueue;
40865+atomic_unchecked_t fscache_n_op_requeue;
40866+atomic_unchecked_t fscache_n_op_deferred_release;
40867+atomic_unchecked_t fscache_n_op_release;
40868+atomic_unchecked_t fscache_n_op_gc;
40869+atomic_unchecked_t fscache_n_op_cancelled;
40870+atomic_unchecked_t fscache_n_op_rejected;
40871+
40872+atomic_unchecked_t fscache_n_attr_changed;
40873+atomic_unchecked_t fscache_n_attr_changed_ok;
40874+atomic_unchecked_t fscache_n_attr_changed_nobufs;
40875+atomic_unchecked_t fscache_n_attr_changed_nomem;
40876+atomic_unchecked_t fscache_n_attr_changed_calls;
40877+
40878+atomic_unchecked_t fscache_n_allocs;
40879+atomic_unchecked_t fscache_n_allocs_ok;
40880+atomic_unchecked_t fscache_n_allocs_wait;
40881+atomic_unchecked_t fscache_n_allocs_nobufs;
40882+atomic_unchecked_t fscache_n_allocs_intr;
40883+atomic_unchecked_t fscache_n_allocs_object_dead;
40884+atomic_unchecked_t fscache_n_alloc_ops;
40885+atomic_unchecked_t fscache_n_alloc_op_waits;
40886+
40887+atomic_unchecked_t fscache_n_retrievals;
40888+atomic_unchecked_t fscache_n_retrievals_ok;
40889+atomic_unchecked_t fscache_n_retrievals_wait;
40890+atomic_unchecked_t fscache_n_retrievals_nodata;
40891+atomic_unchecked_t fscache_n_retrievals_nobufs;
40892+atomic_unchecked_t fscache_n_retrievals_intr;
40893+atomic_unchecked_t fscache_n_retrievals_nomem;
40894+atomic_unchecked_t fscache_n_retrievals_object_dead;
40895+atomic_unchecked_t fscache_n_retrieval_ops;
40896+atomic_unchecked_t fscache_n_retrieval_op_waits;
40897+
40898+atomic_unchecked_t fscache_n_stores;
40899+atomic_unchecked_t fscache_n_stores_ok;
40900+atomic_unchecked_t fscache_n_stores_again;
40901+atomic_unchecked_t fscache_n_stores_nobufs;
40902+atomic_unchecked_t fscache_n_stores_oom;
40903+atomic_unchecked_t fscache_n_store_ops;
40904+atomic_unchecked_t fscache_n_store_calls;
40905+atomic_unchecked_t fscache_n_store_pages;
40906+atomic_unchecked_t fscache_n_store_radix_deletes;
40907+atomic_unchecked_t fscache_n_store_pages_over_limit;
40908+
40909+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40910+atomic_unchecked_t fscache_n_store_vmscan_gone;
40911+atomic_unchecked_t fscache_n_store_vmscan_busy;
40912+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40913+
40914+atomic_unchecked_t fscache_n_marks;
40915+atomic_unchecked_t fscache_n_uncaches;
40916+
40917+atomic_unchecked_t fscache_n_acquires;
40918+atomic_unchecked_t fscache_n_acquires_null;
40919+atomic_unchecked_t fscache_n_acquires_no_cache;
40920+atomic_unchecked_t fscache_n_acquires_ok;
40921+atomic_unchecked_t fscache_n_acquires_nobufs;
40922+atomic_unchecked_t fscache_n_acquires_oom;
40923+
40924+atomic_unchecked_t fscache_n_updates;
40925+atomic_unchecked_t fscache_n_updates_null;
40926+atomic_unchecked_t fscache_n_updates_run;
40927+
40928+atomic_unchecked_t fscache_n_relinquishes;
40929+atomic_unchecked_t fscache_n_relinquishes_null;
40930+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40931+atomic_unchecked_t fscache_n_relinquishes_retire;
40932+
40933+atomic_unchecked_t fscache_n_cookie_index;
40934+atomic_unchecked_t fscache_n_cookie_data;
40935+atomic_unchecked_t fscache_n_cookie_special;
40936+
40937+atomic_unchecked_t fscache_n_object_alloc;
40938+atomic_unchecked_t fscache_n_object_no_alloc;
40939+atomic_unchecked_t fscache_n_object_lookups;
40940+atomic_unchecked_t fscache_n_object_lookups_negative;
40941+atomic_unchecked_t fscache_n_object_lookups_positive;
40942+atomic_unchecked_t fscache_n_object_lookups_timed_out;
40943+atomic_unchecked_t fscache_n_object_created;
40944+atomic_unchecked_t fscache_n_object_avail;
40945+atomic_unchecked_t fscache_n_object_dead;
40946+
40947+atomic_unchecked_t fscache_n_checkaux_none;
40948+atomic_unchecked_t fscache_n_checkaux_okay;
40949+atomic_unchecked_t fscache_n_checkaux_update;
40950+atomic_unchecked_t fscache_n_checkaux_obsolete;
40951
40952 atomic_t fscache_n_cop_alloc_object;
40953 atomic_t fscache_n_cop_lookup_object;
40954@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40955 seq_puts(m, "FS-Cache statistics\n");
40956
40957 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40958- atomic_read(&fscache_n_cookie_index),
40959- atomic_read(&fscache_n_cookie_data),
40960- atomic_read(&fscache_n_cookie_special));
40961+ atomic_read_unchecked(&fscache_n_cookie_index),
40962+ atomic_read_unchecked(&fscache_n_cookie_data),
40963+ atomic_read_unchecked(&fscache_n_cookie_special));
40964
40965 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40966- atomic_read(&fscache_n_object_alloc),
40967- atomic_read(&fscache_n_object_no_alloc),
40968- atomic_read(&fscache_n_object_avail),
40969- atomic_read(&fscache_n_object_dead));
40970+ atomic_read_unchecked(&fscache_n_object_alloc),
40971+ atomic_read_unchecked(&fscache_n_object_no_alloc),
40972+ atomic_read_unchecked(&fscache_n_object_avail),
40973+ atomic_read_unchecked(&fscache_n_object_dead));
40974 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40975- atomic_read(&fscache_n_checkaux_none),
40976- atomic_read(&fscache_n_checkaux_okay),
40977- atomic_read(&fscache_n_checkaux_update),
40978- atomic_read(&fscache_n_checkaux_obsolete));
40979+ atomic_read_unchecked(&fscache_n_checkaux_none),
40980+ atomic_read_unchecked(&fscache_n_checkaux_okay),
40981+ atomic_read_unchecked(&fscache_n_checkaux_update),
40982+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40983
40984 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40985- atomic_read(&fscache_n_marks),
40986- atomic_read(&fscache_n_uncaches));
40987+ atomic_read_unchecked(&fscache_n_marks),
40988+ atomic_read_unchecked(&fscache_n_uncaches));
40989
40990 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40991 " oom=%u\n",
40992- atomic_read(&fscache_n_acquires),
40993- atomic_read(&fscache_n_acquires_null),
40994- atomic_read(&fscache_n_acquires_no_cache),
40995- atomic_read(&fscache_n_acquires_ok),
40996- atomic_read(&fscache_n_acquires_nobufs),
40997- atomic_read(&fscache_n_acquires_oom));
40998+ atomic_read_unchecked(&fscache_n_acquires),
40999+ atomic_read_unchecked(&fscache_n_acquires_null),
41000+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
41001+ atomic_read_unchecked(&fscache_n_acquires_ok),
41002+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
41003+ atomic_read_unchecked(&fscache_n_acquires_oom));
41004
41005 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
41006- atomic_read(&fscache_n_object_lookups),
41007- atomic_read(&fscache_n_object_lookups_negative),
41008- atomic_read(&fscache_n_object_lookups_positive),
41009- atomic_read(&fscache_n_object_created),
41010- atomic_read(&fscache_n_object_lookups_timed_out));
41011+ atomic_read_unchecked(&fscache_n_object_lookups),
41012+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
41013+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
41014+ atomic_read_unchecked(&fscache_n_object_created),
41015+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
41016
41017 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
41018- atomic_read(&fscache_n_updates),
41019- atomic_read(&fscache_n_updates_null),
41020- atomic_read(&fscache_n_updates_run));
41021+ atomic_read_unchecked(&fscache_n_updates),
41022+ atomic_read_unchecked(&fscache_n_updates_null),
41023+ atomic_read_unchecked(&fscache_n_updates_run));
41024
41025 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
41026- atomic_read(&fscache_n_relinquishes),
41027- atomic_read(&fscache_n_relinquishes_null),
41028- atomic_read(&fscache_n_relinquishes_waitcrt),
41029- atomic_read(&fscache_n_relinquishes_retire));
41030+ atomic_read_unchecked(&fscache_n_relinquishes),
41031+ atomic_read_unchecked(&fscache_n_relinquishes_null),
41032+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
41033+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
41034
41035 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
41036- atomic_read(&fscache_n_attr_changed),
41037- atomic_read(&fscache_n_attr_changed_ok),
41038- atomic_read(&fscache_n_attr_changed_nobufs),
41039- atomic_read(&fscache_n_attr_changed_nomem),
41040- atomic_read(&fscache_n_attr_changed_calls));
41041+ atomic_read_unchecked(&fscache_n_attr_changed),
41042+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
41043+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
41044+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
41045+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
41046
41047 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
41048- atomic_read(&fscache_n_allocs),
41049- atomic_read(&fscache_n_allocs_ok),
41050- atomic_read(&fscache_n_allocs_wait),
41051- atomic_read(&fscache_n_allocs_nobufs),
41052- atomic_read(&fscache_n_allocs_intr));
41053+ atomic_read_unchecked(&fscache_n_allocs),
41054+ atomic_read_unchecked(&fscache_n_allocs_ok),
41055+ atomic_read_unchecked(&fscache_n_allocs_wait),
41056+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
41057+ atomic_read_unchecked(&fscache_n_allocs_intr));
41058 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
41059- atomic_read(&fscache_n_alloc_ops),
41060- atomic_read(&fscache_n_alloc_op_waits),
41061- atomic_read(&fscache_n_allocs_object_dead));
41062+ atomic_read_unchecked(&fscache_n_alloc_ops),
41063+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
41064+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
41065
41066 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
41067 " int=%u oom=%u\n",
41068- atomic_read(&fscache_n_retrievals),
41069- atomic_read(&fscache_n_retrievals_ok),
41070- atomic_read(&fscache_n_retrievals_wait),
41071- atomic_read(&fscache_n_retrievals_nodata),
41072- atomic_read(&fscache_n_retrievals_nobufs),
41073- atomic_read(&fscache_n_retrievals_intr),
41074- atomic_read(&fscache_n_retrievals_nomem));
41075+ atomic_read_unchecked(&fscache_n_retrievals),
41076+ atomic_read_unchecked(&fscache_n_retrievals_ok),
41077+ atomic_read_unchecked(&fscache_n_retrievals_wait),
41078+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
41079+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
41080+ atomic_read_unchecked(&fscache_n_retrievals_intr),
41081+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
41082 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
41083- atomic_read(&fscache_n_retrieval_ops),
41084- atomic_read(&fscache_n_retrieval_op_waits),
41085- atomic_read(&fscache_n_retrievals_object_dead));
41086+ atomic_read_unchecked(&fscache_n_retrieval_ops),
41087+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
41088+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
41089
41090 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
41091- atomic_read(&fscache_n_stores),
41092- atomic_read(&fscache_n_stores_ok),
41093- atomic_read(&fscache_n_stores_again),
41094- atomic_read(&fscache_n_stores_nobufs),
41095- atomic_read(&fscache_n_stores_oom));
41096+ atomic_read_unchecked(&fscache_n_stores),
41097+ atomic_read_unchecked(&fscache_n_stores_ok),
41098+ atomic_read_unchecked(&fscache_n_stores_again),
41099+ atomic_read_unchecked(&fscache_n_stores_nobufs),
41100+ atomic_read_unchecked(&fscache_n_stores_oom));
41101 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
41102- atomic_read(&fscache_n_store_ops),
41103- atomic_read(&fscache_n_store_calls),
41104- atomic_read(&fscache_n_store_pages),
41105- atomic_read(&fscache_n_store_radix_deletes),
41106- atomic_read(&fscache_n_store_pages_over_limit));
41107+ atomic_read_unchecked(&fscache_n_store_ops),
41108+ atomic_read_unchecked(&fscache_n_store_calls),
41109+ atomic_read_unchecked(&fscache_n_store_pages),
41110+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
41111+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
41112
41113 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
41114- atomic_read(&fscache_n_store_vmscan_not_storing),
41115- atomic_read(&fscache_n_store_vmscan_gone),
41116- atomic_read(&fscache_n_store_vmscan_busy),
41117- atomic_read(&fscache_n_store_vmscan_cancelled));
41118+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
41119+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
41120+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
41121+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
41122
41123 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
41124- atomic_read(&fscache_n_op_pend),
41125- atomic_read(&fscache_n_op_run),
41126- atomic_read(&fscache_n_op_enqueue),
41127- atomic_read(&fscache_n_op_cancelled),
41128- atomic_read(&fscache_n_op_rejected));
41129+ atomic_read_unchecked(&fscache_n_op_pend),
41130+ atomic_read_unchecked(&fscache_n_op_run),
41131+ atomic_read_unchecked(&fscache_n_op_enqueue),
41132+ atomic_read_unchecked(&fscache_n_op_cancelled),
41133+ atomic_read_unchecked(&fscache_n_op_rejected));
41134 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
41135- atomic_read(&fscache_n_op_deferred_release),
41136- atomic_read(&fscache_n_op_release),
41137- atomic_read(&fscache_n_op_gc));
41138+ atomic_read_unchecked(&fscache_n_op_deferred_release),
41139+ atomic_read_unchecked(&fscache_n_op_release),
41140+ atomic_read_unchecked(&fscache_n_op_gc));
41141
41142 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
41143 atomic_read(&fscache_n_cop_alloc_object),
41144diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
41145--- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
41146+++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
41147@@ -4,6 +4,7 @@
41148 #include <linux/path.h>
41149 #include <linux/slab.h>
41150 #include <linux/fs_struct.h>
41151+#include <linux/grsecurity.h>
41152 #include "internal.h"
41153
41154 static inline void path_get_longterm(struct path *path)
41155@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
41156 old_root = fs->root;
41157 fs->root = *path;
41158 path_get_longterm(path);
41159+ gr_set_chroot_entries(current, path);
41160 write_seqcount_end(&fs->seq);
41161 spin_unlock(&fs->lock);
41162 if (old_root.dentry)
41163@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
41164 && fs->root.mnt == old_root->mnt) {
41165 path_get_longterm(new_root);
41166 fs->root = *new_root;
41167+ gr_set_chroot_entries(p, new_root);
41168 count++;
41169 }
41170 if (fs->pwd.dentry == old_root->dentry
41171@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
41172 spin_lock(&fs->lock);
41173 write_seqcount_begin(&fs->seq);
41174 tsk->fs = NULL;
41175- kill = !--fs->users;
41176+ gr_clear_chroot_entries(tsk);
41177+ kill = !atomic_dec_return(&fs->users);
41178 write_seqcount_end(&fs->seq);
41179 spin_unlock(&fs->lock);
41180 task_unlock(tsk);
41181@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
41182 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
41183 /* We don't need to lock fs - think why ;-) */
41184 if (fs) {
41185- fs->users = 1;
41186+ atomic_set(&fs->users, 1);
41187 fs->in_exec = 0;
41188 spin_lock_init(&fs->lock);
41189 seqcount_init(&fs->seq);
41190@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
41191 spin_lock(&old->lock);
41192 fs->root = old->root;
41193 path_get_longterm(&fs->root);
41194+ /* instead of calling gr_set_chroot_entries here,
41195+ we call it from every caller of this function
41196+ */
41197 fs->pwd = old->pwd;
41198 path_get_longterm(&fs->pwd);
41199 spin_unlock(&old->lock);
41200@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
41201
41202 task_lock(current);
41203 spin_lock(&fs->lock);
41204- kill = !--fs->users;
41205+ kill = !atomic_dec_return(&fs->users);
41206 current->fs = new_fs;
41207+ gr_set_chroot_entries(current, &new_fs->root);
41208 spin_unlock(&fs->lock);
41209 task_unlock(current);
41210
41211@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
41212
41213 /* to be mentioned only in INIT_TASK */
41214 struct fs_struct init_fs = {
41215- .users = 1,
41216+ .users = ATOMIC_INIT(1),
41217 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
41218 .seq = SEQCNT_ZERO,
41219 .umask = 0022,
41220@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
41221 task_lock(current);
41222
41223 spin_lock(&init_fs.lock);
41224- init_fs.users++;
41225+ atomic_inc(&init_fs.users);
41226 spin_unlock(&init_fs.lock);
41227
41228 spin_lock(&fs->lock);
41229 current->fs = &init_fs;
41230- kill = !--fs->users;
41231+ gr_set_chroot_entries(current, &current->fs->root);
41232+ kill = !atomic_dec_return(&fs->users);
41233 spin_unlock(&fs->lock);
41234
41235 task_unlock(current);
41236diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
41237--- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
41238+++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
41239@@ -586,10 +586,12 @@ static int __init cuse_init(void)
41240 INIT_LIST_HEAD(&cuse_conntbl[i]);
41241
41242 /* inherit and extend fuse_dev_operations */
41243- cuse_channel_fops = fuse_dev_operations;
41244- cuse_channel_fops.owner = THIS_MODULE;
41245- cuse_channel_fops.open = cuse_channel_open;
41246- cuse_channel_fops.release = cuse_channel_release;
41247+ pax_open_kernel();
41248+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
41249+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
41250+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
41251+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
41252+ pax_close_kernel();
41253
41254 cuse_class = class_create(THIS_MODULE, "cuse");
41255 if (IS_ERR(cuse_class))
41256diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
41257--- linux-3.0.4/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
41258+++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
41259@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
41260 ret = 0;
41261 pipe_lock(pipe);
41262
41263- if (!pipe->readers) {
41264+ if (!atomic_read(&pipe->readers)) {
41265 send_sig(SIGPIPE, current, 0);
41266 if (!ret)
41267 ret = -EPIPE;
41268diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
41269--- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
41270+++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
41271@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
41272 return link;
41273 }
41274
41275-static void free_link(char *link)
41276+static void free_link(const char *link)
41277 {
41278 if (!IS_ERR(link))
41279 free_page((unsigned long) link);
41280diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
41281--- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
41282+++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
41283@@ -1525,7 +1525,7 @@ out:
41284
41285 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41286 {
41287- char *s = nd_get_link(nd);
41288+ const char *s = nd_get_link(nd);
41289 if (!IS_ERR(s))
41290 kfree(s);
41291 }
41292diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
41293--- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
41294+++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
41295@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
41296 int err;
41297 u16 type;
41298
41299+ pax_track_stack();
41300+
41301 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
41302 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
41303 if (err)
41304@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
41305 int entry_size;
41306 int err;
41307
41308+ pax_track_stack();
41309+
41310 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
41311 str->name, cnid, inode->i_nlink);
41312 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
41313@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
41314 int entry_size, type;
41315 int err = 0;
41316
41317+ pax_track_stack();
41318+
41319 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
41320 cnid, src_dir->i_ino, src_name->name,
41321 dst_dir->i_ino, dst_name->name);
41322diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
41323--- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
41324+++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
41325@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
41326 struct hfsplus_readdir_data *rd;
41327 u16 type;
41328
41329+ pax_track_stack();
41330+
41331 if (filp->f_pos >= inode->i_size)
41332 return 0;
41333
41334diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
41335--- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
41336+++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
41337@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
41338 int res = 0;
41339 u16 type;
41340
41341+ pax_track_stack();
41342+
41343 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
41344
41345 HFSPLUS_I(inode)->linkid = 0;
41346@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
41347 struct hfs_find_data fd;
41348 hfsplus_cat_entry entry;
41349
41350+ pax_track_stack();
41351+
41352 if (HFSPLUS_IS_RSRC(inode))
41353 main_inode = HFSPLUS_I(inode)->rsrc_inode;
41354
41355diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
41356--- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41357+++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
41358@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
41359 struct hfsplus_cat_file *file;
41360 int res;
41361
41362+ pax_track_stack();
41363+
41364 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
41365 return -EOPNOTSUPP;
41366
41367@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
41368 struct hfsplus_cat_file *file;
41369 ssize_t res = 0;
41370
41371+ pax_track_stack();
41372+
41373 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
41374 return -EOPNOTSUPP;
41375
41376diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
41377--- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
41378+++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
41379@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
41380 struct nls_table *nls = NULL;
41381 int err;
41382
41383+ pax_track_stack();
41384+
41385 err = -EINVAL;
41386 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
41387 if (!sbi)
41388diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
41389--- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41390+++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
41391@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
41392 .kill_sb = kill_litter_super,
41393 };
41394
41395-static struct vfsmount *hugetlbfs_vfsmount;
41396+struct vfsmount *hugetlbfs_vfsmount;
41397
41398 static int can_do_hugetlb_shm(void)
41399 {
41400diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
41401--- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
41402+++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
41403@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
41404
41405 #ifdef CONFIG_SMP
41406 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
41407- static atomic_t shared_last_ino;
41408- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
41409+ static atomic_unchecked_t shared_last_ino;
41410+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
41411
41412 res = next - LAST_INO_BATCH;
41413 }
41414diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
41415--- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
41416+++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
41417@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
41418 tid_t this_tid;
41419 int result;
41420
41421+ pax_track_stack();
41422+
41423 jbd_debug(1, "Start checkpoint\n");
41424
41425 /*
41426diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
41427--- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
41428+++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
41429@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
41430 int outpos = 0;
41431 int pos=0;
41432
41433+ pax_track_stack();
41434+
41435 memset(positions,0,sizeof(positions));
41436
41437 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
41438@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
41439 int outpos = 0;
41440 int pos=0;
41441
41442+ pax_track_stack();
41443+
41444 memset(positions,0,sizeof(positions));
41445
41446 while (outpos<destlen) {
41447diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
41448--- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
41449+++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
41450@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
41451 int ret;
41452 uint32_t mysrclen, mydstlen;
41453
41454+ pax_track_stack();
41455+
41456 mysrclen = *sourcelen;
41457 mydstlen = *dstlen - 8;
41458
41459diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
41460--- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
41461+++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
41462@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
41463 struct jffs2_unknown_node marker = {
41464 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
41465 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41466- .totlen = cpu_to_je32(c->cleanmarker_size)
41467+ .totlen = cpu_to_je32(c->cleanmarker_size),
41468+ .hdr_crc = cpu_to_je32(0)
41469 };
41470
41471 jffs2_prealloc_raw_node_refs(c, jeb, 1);
41472diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
41473--- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
41474+++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
41475@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
41476 {
41477 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
41478 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41479- .totlen = constant_cpu_to_je32(8)
41480+ .totlen = constant_cpu_to_je32(8),
41481+ .hdr_crc = constant_cpu_to_je32(0)
41482 };
41483
41484 /*
41485diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
41486--- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
41487+++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
41488@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
41489
41490 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
41491
41492+ pax_track_stack();
41493+
41494 /* Phase.1 : Merge same xref */
41495 for (i=0; i < XREF_TMPHASH_SIZE; i++)
41496 xref_tmphash[i] = NULL;
41497diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
41498--- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
41499+++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
41500@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
41501
41502 jfs_inode_cachep =
41503 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
41504- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
41505+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
41506 init_once);
41507 if (jfs_inode_cachep == NULL)
41508 return -ENOMEM;
41509diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
41510--- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
41511+++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
41512@@ -86,7 +86,7 @@ config HAVE_AOUT
41513
41514 config BINFMT_AOUT
41515 tristate "Kernel support for a.out and ECOFF binaries"
41516- depends on HAVE_AOUT
41517+ depends on HAVE_AOUT && BROKEN
41518 ---help---
41519 A.out (Assembler.OUTput) is a set of formats for libraries and
41520 executables used in the earliest versions of UNIX. Linux used
41521diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
41522--- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
41523+++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
41524@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
41525
41526 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
41527 struct dentry *next;
41528+ char d_name[sizeof(next->d_iname)];
41529+ const unsigned char *name;
41530+
41531 next = list_entry(p, struct dentry, d_u.d_child);
41532 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
41533 if (!simple_positive(next)) {
41534@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
41535
41536 spin_unlock(&next->d_lock);
41537 spin_unlock(&dentry->d_lock);
41538- if (filldir(dirent, next->d_name.name,
41539+ name = next->d_name.name;
41540+ if (name == next->d_iname) {
41541+ memcpy(d_name, name, next->d_name.len);
41542+ name = d_name;
41543+ }
41544+ if (filldir(dirent, name,
41545 next->d_name.len, filp->f_pos,
41546 next->d_inode->i_ino,
41547 dt_type(next->d_inode)) < 0)
41548diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
41549--- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
41550+++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
41551@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41552 /*
41553 * Cookie counter for NLM requests
41554 */
41555-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41556+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41557
41558 void nlmclnt_next_cookie(struct nlm_cookie *c)
41559 {
41560- u32 cookie = atomic_inc_return(&nlm_cookie);
41561+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41562
41563 memcpy(c->data, &cookie, 4);
41564 c->len=4;
41565@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41566 struct nlm_rqst reqst, *req;
41567 int status;
41568
41569+ pax_track_stack();
41570+
41571 req = &reqst;
41572 memset(req, 0, sizeof(*req));
41573 locks_init_lock(&req->a_args.lock.fl);
41574diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
41575--- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
41576+++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
41577@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
41578 return;
41579
41580 if (filp->f_op && filp->f_op->flock) {
41581- struct file_lock fl = {
41582+ struct file_lock flock = {
41583 .fl_pid = current->tgid,
41584 .fl_file = filp,
41585 .fl_flags = FL_FLOCK,
41586 .fl_type = F_UNLCK,
41587 .fl_end = OFFSET_MAX,
41588 };
41589- filp->f_op->flock(filp, F_SETLKW, &fl);
41590- if (fl.fl_ops && fl.fl_ops->fl_release_private)
41591- fl.fl_ops->fl_release_private(&fl);
41592+ filp->f_op->flock(filp, F_SETLKW, &flock);
41593+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
41594+ flock.fl_ops->fl_release_private(&flock);
41595 }
41596
41597 lock_flocks();
41598diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
41599--- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
41600+++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
41601@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
41602 struct logfs_disk_super _ds1, *ds1 = &_ds1;
41603 int err, valid0, valid1;
41604
41605+ pax_track_stack();
41606+
41607 /* read first superblock */
41608 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
41609 if (err)
41610diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
41611--- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
41612+++ linux-3.0.4/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
41613@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
41614 return ret;
41615
41616 /*
41617- * Read/write DACs are always overridable.
41618- * Executable DACs are overridable for all directories and
41619- * for non-directories that have least one exec bit set.
41620+ * Searching includes executable on directories, else just read.
41621 */
41622- if (!(mask & MAY_EXEC) || execute_ok(inode))
41623- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41624+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41625+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
41626+#ifdef CONFIG_GRKERNSEC
41627+ if (flags & IPERM_FLAG_RCU)
41628+ return -ECHILD;
41629+#endif
41630+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41631 return 0;
41632+ }
41633
41634 /*
41635- * Searching includes executable on directories, else just read.
41636+ * Read/write DACs are always overridable.
41637+ * Executable DACs are overridable for all directories and
41638+ * for non-directories that have least one exec bit set.
41639 */
41640- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41641- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
41642- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41643+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
41644+#ifdef CONFIG_GRKERNSEC
41645+ if (flags & IPERM_FLAG_RCU)
41646+ return -ECHILD;
41647+#endif
41648+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41649 return 0;
41650+ }
41651
41652 return -EACCES;
41653 }
41654@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
41655 br_read_unlock(vfsmount_lock);
41656 }
41657
41658+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41659+ return -ENOENT;
41660+
41661 if (likely(!(nd->flags & LOOKUP_JUMPED)))
41662 return 0;
41663
41664@@ -593,9 +606,16 @@ static inline int exec_permission(struct
41665 if (ret == -ECHILD)
41666 return ret;
41667
41668- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
41669- ns_capable(ns, CAP_DAC_READ_SEARCH))
41670+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
41671 goto ok;
41672+ else {
41673+#ifdef CONFIG_GRKERNSEC
41674+ if (flags & IPERM_FLAG_RCU)
41675+ return -ECHILD;
41676+#endif
41677+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
41678+ goto ok;
41679+ }
41680
41681 return ret;
41682 ok:
41683@@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
41684 return error;
41685 }
41686
41687+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
41688+ dentry->d_inode, dentry, nd->path.mnt)) {
41689+ error = -EACCES;
41690+ *p = ERR_PTR(error); /* no ->put_link(), please */
41691+ path_put(&nd->path);
41692+ return error;
41693+ }
41694+
41695 nd->last_type = LAST_BIND;
41696 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
41697 error = PTR_ERR(*p);
41698 if (!IS_ERR(*p)) {
41699- char *s = nd_get_link(nd);
41700+ const char *s = nd_get_link(nd);
41701 error = 0;
41702 if (s)
41703 error = __vfs_follow_link(nd, s);
41704@@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
41705 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
41706
41707 if (likely(!retval)) {
41708+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41709+ return -ENOENT;
41710+
41711 if (unlikely(!audit_dummy_context())) {
41712 if (nd->path.dentry && nd->inode)
41713 audit_inode(name, nd->path.dentry);
41714@@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
41715 return error;
41716 }
41717
41718+/*
41719+ * Note that while the flag value (low two bits) for sys_open means:
41720+ * 00 - read-only
41721+ * 01 - write-only
41722+ * 10 - read-write
41723+ * 11 - special
41724+ * it is changed into
41725+ * 00 - no permissions needed
41726+ * 01 - read-permission
41727+ * 10 - write-permission
41728+ * 11 - read-write
41729+ * for the internal routines (ie open_namei()/follow_link() etc)
41730+ * This is more logical, and also allows the 00 "no perm needed"
41731+ * to be used for symlinks (where the permissions are checked
41732+ * later).
41733+ *
41734+*/
41735+static inline int open_to_namei_flags(int flag)
41736+{
41737+ if ((flag+1) & O_ACCMODE)
41738+ flag++;
41739+ return flag;
41740+}
41741+
41742 static int may_open(struct path *path, int acc_mode, int flag)
41743 {
41744 struct dentry *dentry = path->dentry;
41745@@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
41746 /*
41747 * Ensure there are no outstanding leases on the file.
41748 */
41749- return break_lease(inode, flag);
41750+ error = break_lease(inode, flag);
41751+
41752+ if (error)
41753+ return error;
41754+
41755+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41756+ error = -EPERM;
41757+ goto exit;
41758+ }
41759+
41760+ if (gr_handle_rawio(inode)) {
41761+ error = -EPERM;
41762+ goto exit;
41763+ }
41764+
41765+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
41766+ error = -EACCES;
41767+ goto exit;
41768+ }
41769+exit:
41770+ return error;
41771 }
41772
41773 static int handle_truncate(struct file *filp)
41774@@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
41775 }
41776
41777 /*
41778- * Note that while the flag value (low two bits) for sys_open means:
41779- * 00 - read-only
41780- * 01 - write-only
41781- * 10 - read-write
41782- * 11 - special
41783- * it is changed into
41784- * 00 - no permissions needed
41785- * 01 - read-permission
41786- * 10 - write-permission
41787- * 11 - read-write
41788- * for the internal routines (ie open_namei()/follow_link() etc)
41789- * This is more logical, and also allows the 00 "no perm needed"
41790- * to be used for symlinks (where the permissions are checked
41791- * later).
41792- *
41793-*/
41794-static inline int open_to_namei_flags(int flag)
41795-{
41796- if ((flag+1) & O_ACCMODE)
41797- flag++;
41798- return flag;
41799-}
41800-
41801-/*
41802 * Handle the last step of open()
41803 */
41804 static struct file *do_last(struct nameidata *nd, struct path *path,
41805@@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
41806 struct dentry *dir = nd->path.dentry;
41807 struct dentry *dentry;
41808 int open_flag = op->open_flag;
41809+ int flag = open_to_namei_flags(open_flag);
41810 int will_truncate = open_flag & O_TRUNC;
41811 int want_write = 0;
41812 int acc_mode = op->acc_mode;
41813@@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
41814 /* Negative dentry, just create the file */
41815 if (!dentry->d_inode) {
41816 int mode = op->mode;
41817+
41818+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
41819+ error = -EACCES;
41820+ goto exit_mutex_unlock;
41821+ }
41822+
41823 if (!IS_POSIXACL(dir->d_inode))
41824 mode &= ~current_umask();
41825 /*
41826@@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
41827 error = vfs_create(dir->d_inode, dentry, mode, nd);
41828 if (error)
41829 goto exit_mutex_unlock;
41830+ else
41831+ gr_handle_create(path->dentry, path->mnt);
41832 mutex_unlock(&dir->d_inode->i_mutex);
41833 dput(nd->path.dentry);
41834 nd->path.dentry = dentry;
41835@@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
41836 /*
41837 * It already exists.
41838 */
41839+
41840+ /* only check if O_CREAT is specified, all other checks need to go
41841+ into may_open */
41842+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
41843+ error = -EACCES;
41844+ goto exit_mutex_unlock;
41845+ }
41846+
41847 mutex_unlock(&dir->d_inode->i_mutex);
41848 audit_inode(pathname, path->dentry);
41849
41850@@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41851 error = may_mknod(mode);
41852 if (error)
41853 goto out_dput;
41854+
41855+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41856+ error = -EPERM;
41857+ goto out_dput;
41858+ }
41859+
41860+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41861+ error = -EACCES;
41862+ goto out_dput;
41863+ }
41864+
41865 error = mnt_want_write(nd.path.mnt);
41866 if (error)
41867 goto out_dput;
41868@@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41869 }
41870 out_drop_write:
41871 mnt_drop_write(nd.path.mnt);
41872+
41873+ if (!error)
41874+ gr_handle_create(dentry, nd.path.mnt);
41875 out_dput:
41876 dput(dentry);
41877 out_unlock:
41878@@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41879 if (IS_ERR(dentry))
41880 goto out_unlock;
41881
41882+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41883+ error = -EACCES;
41884+ goto out_dput;
41885+ }
41886+
41887 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41888 mode &= ~current_umask();
41889 error = mnt_want_write(nd.path.mnt);
41890@@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41891 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41892 out_drop_write:
41893 mnt_drop_write(nd.path.mnt);
41894+
41895+ if (!error)
41896+ gr_handle_create(dentry, nd.path.mnt);
41897+
41898 out_dput:
41899 dput(dentry);
41900 out_unlock:
41901@@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
41902 char * name;
41903 struct dentry *dentry;
41904 struct nameidata nd;
41905+ ino_t saved_ino = 0;
41906+ dev_t saved_dev = 0;
41907
41908 error = user_path_parent(dfd, pathname, &nd, &name);
41909 if (error)
41910@@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
41911 error = -ENOENT;
41912 goto exit3;
41913 }
41914+
41915+ if (dentry->d_inode->i_nlink <= 1) {
41916+ saved_ino = dentry->d_inode->i_ino;
41917+ saved_dev = gr_get_dev_from_dentry(dentry);
41918+ }
41919+
41920+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41921+ error = -EACCES;
41922+ goto exit3;
41923+ }
41924+
41925 error = mnt_want_write(nd.path.mnt);
41926 if (error)
41927 goto exit3;
41928@@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
41929 if (error)
41930 goto exit4;
41931 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41932+ if (!error && (saved_dev || saved_ino))
41933+ gr_handle_delete(saved_ino, saved_dev);
41934 exit4:
41935 mnt_drop_write(nd.path.mnt);
41936 exit3:
41937@@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
41938 struct dentry *dentry;
41939 struct nameidata nd;
41940 struct inode *inode = NULL;
41941+ ino_t saved_ino = 0;
41942+ dev_t saved_dev = 0;
41943
41944 error = user_path_parent(dfd, pathname, &nd, &name);
41945 if (error)
41946@@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
41947 if (!inode)
41948 goto slashes;
41949 ihold(inode);
41950+
41951+ if (inode->i_nlink <= 1) {
41952+ saved_ino = inode->i_ino;
41953+ saved_dev = gr_get_dev_from_dentry(dentry);
41954+ }
41955+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41956+ error = -EACCES;
41957+ goto exit2;
41958+ }
41959+
41960 error = mnt_want_write(nd.path.mnt);
41961 if (error)
41962 goto exit2;
41963@@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
41964 if (error)
41965 goto exit3;
41966 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41967+ if (!error && (saved_ino || saved_dev))
41968+ gr_handle_delete(saved_ino, saved_dev);
41969 exit3:
41970 mnt_drop_write(nd.path.mnt);
41971 exit2:
41972@@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41973 if (IS_ERR(dentry))
41974 goto out_unlock;
41975
41976+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41977+ error = -EACCES;
41978+ goto out_dput;
41979+ }
41980+
41981 error = mnt_want_write(nd.path.mnt);
41982 if (error)
41983 goto out_dput;
41984@@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41985 if (error)
41986 goto out_drop_write;
41987 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41988+ if (!error)
41989+ gr_handle_create(dentry, nd.path.mnt);
41990 out_drop_write:
41991 mnt_drop_write(nd.path.mnt);
41992 out_dput:
41993@@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41994 error = PTR_ERR(new_dentry);
41995 if (IS_ERR(new_dentry))
41996 goto out_unlock;
41997+
41998+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41999+ old_path.dentry->d_inode,
42000+ old_path.dentry->d_inode->i_mode, to)) {
42001+ error = -EACCES;
42002+ goto out_dput;
42003+ }
42004+
42005+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
42006+ old_path.dentry, old_path.mnt, to)) {
42007+ error = -EACCES;
42008+ goto out_dput;
42009+ }
42010+
42011 error = mnt_want_write(nd.path.mnt);
42012 if (error)
42013 goto out_dput;
42014@@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
42015 if (error)
42016 goto out_drop_write;
42017 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
42018+ if (!error)
42019+ gr_handle_create(new_dentry, nd.path.mnt);
42020 out_drop_write:
42021 mnt_drop_write(nd.path.mnt);
42022 out_dput:
42023@@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42024 char *to;
42025 int error;
42026
42027+ pax_track_stack();
42028+
42029 error = user_path_parent(olddfd, oldname, &oldnd, &from);
42030 if (error)
42031 goto exit;
42032@@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42033 if (new_dentry == trap)
42034 goto exit5;
42035
42036+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
42037+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
42038+ to);
42039+ if (error)
42040+ goto exit5;
42041+
42042 error = mnt_want_write(oldnd.path.mnt);
42043 if (error)
42044 goto exit5;
42045@@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42046 goto exit6;
42047 error = vfs_rename(old_dir->d_inode, old_dentry,
42048 new_dir->d_inode, new_dentry);
42049+ if (!error)
42050+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
42051+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
42052 exit6:
42053 mnt_drop_write(oldnd.path.mnt);
42054 exit5:
42055@@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
42056
42057 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
42058 {
42059+ char tmpbuf[64];
42060+ const char *newlink;
42061 int len;
42062
42063 len = PTR_ERR(link);
42064@@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
42065 len = strlen(link);
42066 if (len > (unsigned) buflen)
42067 len = buflen;
42068- if (copy_to_user(buffer, link, len))
42069+
42070+ if (len < sizeof(tmpbuf)) {
42071+ memcpy(tmpbuf, link, len);
42072+ newlink = tmpbuf;
42073+ } else
42074+ newlink = link;
42075+
42076+ if (copy_to_user(buffer, newlink, len))
42077 len = -EFAULT;
42078 out:
42079 return len;
42080diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
42081--- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
42082+++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
42083@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
42084 if (!(sb->s_flags & MS_RDONLY))
42085 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
42086 up_write(&sb->s_umount);
42087+
42088+ gr_log_remount(mnt->mnt_devname, retval);
42089+
42090 return retval;
42091 }
42092
42093@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
42094 br_write_unlock(vfsmount_lock);
42095 up_write(&namespace_sem);
42096 release_mounts(&umount_list);
42097+
42098+ gr_log_unmount(mnt->mnt_devname, retval);
42099+
42100 return retval;
42101 }
42102
42103@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
42104 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
42105 MS_STRICTATIME);
42106
42107+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
42108+ retval = -EPERM;
42109+ goto dput_out;
42110+ }
42111+
42112+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
42113+ retval = -EPERM;
42114+ goto dput_out;
42115+ }
42116+
42117 if (flags & MS_REMOUNT)
42118 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
42119 data_page);
42120@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
42121 dev_name, data_page);
42122 dput_out:
42123 path_put(&path);
42124+
42125+ gr_log_mount(dev_name, dir_name, retval);
42126+
42127 return retval;
42128 }
42129
42130@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
42131 if (error)
42132 goto out2;
42133
42134+ if (gr_handle_chroot_pivot()) {
42135+ error = -EPERM;
42136+ goto out2;
42137+ }
42138+
42139 get_fs_root(current->fs, &root);
42140 error = lock_mount(&old);
42141 if (error)
42142diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
42143--- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
42144+++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
42145@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
42146 int res, val = 0, len;
42147 __u8 __name[NCP_MAXPATHLEN + 1];
42148
42149+ pax_track_stack();
42150+
42151 if (dentry == dentry->d_sb->s_root)
42152 return 1;
42153
42154@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
42155 int error, res, len;
42156 __u8 __name[NCP_MAXPATHLEN + 1];
42157
42158+ pax_track_stack();
42159+
42160 error = -EIO;
42161 if (!ncp_conn_valid(server))
42162 goto finished;
42163@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
42164 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
42165 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
42166
42167+ pax_track_stack();
42168+
42169 ncp_age_dentry(server, dentry);
42170 len = sizeof(__name);
42171 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
42172@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
42173 int error, len;
42174 __u8 __name[NCP_MAXPATHLEN + 1];
42175
42176+ pax_track_stack();
42177+
42178 DPRINTK("ncp_mkdir: making %s/%s\n",
42179 dentry->d_parent->d_name.name, dentry->d_name.name);
42180
42181@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
42182 int old_len, new_len;
42183 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
42184
42185+ pax_track_stack();
42186+
42187 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
42188 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
42189 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
42190diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
42191--- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42192+++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
42193@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
42194 #endif
42195 struct ncp_entry_info finfo;
42196
42197+ pax_track_stack();
42198+
42199 memset(&data, 0, sizeof(data));
42200 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
42201 if (!server)
42202diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
42203--- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42204+++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
42205@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
42206 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
42207 nfsi->attrtimeo_timestamp = jiffies;
42208
42209- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
42210+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
42211 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
42212 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
42213 else
42214@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
42215 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
42216 }
42217
42218-static atomic_long_t nfs_attr_generation_counter;
42219+static atomic_long_unchecked_t nfs_attr_generation_counter;
42220
42221 static unsigned long nfs_read_attr_generation_counter(void)
42222 {
42223- return atomic_long_read(&nfs_attr_generation_counter);
42224+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
42225 }
42226
42227 unsigned long nfs_inc_attr_generation_counter(void)
42228 {
42229- return atomic_long_inc_return(&nfs_attr_generation_counter);
42230+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
42231 }
42232
42233 void nfs_fattr_init(struct nfs_fattr *fattr)
42234diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
42235--- linux-3.0.4/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
42236+++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
42237@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
42238 unsigned int strhashval;
42239 int err;
42240
42241+ pax_track_stack();
42242+
42243 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
42244 (long long) lock->lk_offset,
42245 (long long) lock->lk_length);
42246diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
42247--- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
42248+++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
42249@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
42250 .dentry = dentry,
42251 };
42252
42253+ pax_track_stack();
42254+
42255 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
42256 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
42257 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
42258diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
42259--- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
42260+++ linux-3.0.4/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
42261@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
42262 } else {
42263 oldfs = get_fs();
42264 set_fs(KERNEL_DS);
42265- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
42266+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
42267 set_fs(oldfs);
42268 }
42269
42270@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
42271
42272 /* Write the data. */
42273 oldfs = get_fs(); set_fs(KERNEL_DS);
42274- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
42275+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
42276 set_fs(oldfs);
42277 if (host_err < 0)
42278 goto out_nfserr;
42279@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
42280 */
42281
42282 oldfs = get_fs(); set_fs(KERNEL_DS);
42283- host_err = inode->i_op->readlink(dentry, buf, *lenp);
42284+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
42285 set_fs(oldfs);
42286
42287 if (host_err < 0)
42288diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
42289--- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
42290+++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
42291@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
42292 goto out_close_fd;
42293
42294 ret = -EFAULT;
42295- if (copy_to_user(buf, &fanotify_event_metadata,
42296+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
42297+ copy_to_user(buf, &fanotify_event_metadata,
42298 fanotify_event_metadata.event_len))
42299 goto out_kill_access_response;
42300
42301diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
42302--- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
42303+++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
42304@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
42305 * get set to 0 so it will never get 'freed'
42306 */
42307 static struct fsnotify_event *q_overflow_event;
42308-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
42309+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
42310
42311 /**
42312 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
42313@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
42314 */
42315 u32 fsnotify_get_cookie(void)
42316 {
42317- return atomic_inc_return(&fsnotify_sync_cookie);
42318+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
42319 }
42320 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
42321
42322diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
42323--- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
42324+++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
42325@@ -1329,7 +1329,7 @@ find_next_index_buffer:
42326 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
42327 ~(s64)(ndir->itype.index.block_size - 1)));
42328 /* Bounds checks. */
42329- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42330+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42331 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
42332 "inode 0x%lx or driver bug.", vdir->i_ino);
42333 goto err_out;
42334diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
42335--- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
42336+++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
42337@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
42338 #endif /* NTFS_RW */
42339 };
42340
42341-const struct file_operations ntfs_empty_file_ops = {};
42342+const struct file_operations ntfs_empty_file_ops __read_only;
42343
42344-const struct inode_operations ntfs_empty_inode_ops = {};
42345+const struct inode_operations ntfs_empty_inode_ops __read_only;
42346diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
42347--- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
42348+++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
42349@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
42350 goto bail;
42351 }
42352
42353- atomic_inc(&osb->alloc_stats.moves);
42354+ atomic_inc_unchecked(&osb->alloc_stats.moves);
42355
42356 bail:
42357 if (handle)
42358diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
42359--- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
42360+++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
42361@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
42362 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
42363 struct ocfs2_dir_lookup_result target_insert = { NULL, };
42364
42365+ pax_track_stack();
42366+
42367 /* At some point it might be nice to break this function up a
42368 * bit. */
42369
42370diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
42371--- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
42372+++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
42373@@ -235,11 +235,11 @@ enum ocfs2_vol_state
42374
42375 struct ocfs2_alloc_stats
42376 {
42377- atomic_t moves;
42378- atomic_t local_data;
42379- atomic_t bitmap_data;
42380- atomic_t bg_allocs;
42381- atomic_t bg_extends;
42382+ atomic_unchecked_t moves;
42383+ atomic_unchecked_t local_data;
42384+ atomic_unchecked_t bitmap_data;
42385+ atomic_unchecked_t bg_allocs;
42386+ atomic_unchecked_t bg_extends;
42387 };
42388
42389 enum ocfs2_local_alloc_state
42390diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
42391--- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
42392+++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
42393@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
42394 mlog_errno(status);
42395 goto bail;
42396 }
42397- atomic_inc(&osb->alloc_stats.bg_extends);
42398+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
42399
42400 /* You should never ask for this much metadata */
42401 BUG_ON(bits_wanted >
42402@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
42403 mlog_errno(status);
42404 goto bail;
42405 }
42406- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42407+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42408
42409 *suballoc_loc = res.sr_bg_blkno;
42410 *suballoc_bit_start = res.sr_bit_offset;
42411@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
42412 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
42413 res->sr_bits);
42414
42415- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42416+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42417
42418 BUG_ON(res->sr_bits != 1);
42419
42420@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
42421 mlog_errno(status);
42422 goto bail;
42423 }
42424- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42425+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42426
42427 BUG_ON(res.sr_bits != 1);
42428
42429@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
42430 cluster_start,
42431 num_clusters);
42432 if (!status)
42433- atomic_inc(&osb->alloc_stats.local_data);
42434+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
42435 } else {
42436 if (min_clusters > (osb->bitmap_cpg - 1)) {
42437 /* The only paths asking for contiguousness
42438@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
42439 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
42440 res.sr_bg_blkno,
42441 res.sr_bit_offset);
42442- atomic_inc(&osb->alloc_stats.bitmap_data);
42443+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
42444 *num_clusters = res.sr_bits;
42445 }
42446 }
42447diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
42448--- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
42449+++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
42450@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
42451 "%10s => GlobalAllocs: %d LocalAllocs: %d "
42452 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
42453 "Stats",
42454- atomic_read(&osb->alloc_stats.bitmap_data),
42455- atomic_read(&osb->alloc_stats.local_data),
42456- atomic_read(&osb->alloc_stats.bg_allocs),
42457- atomic_read(&osb->alloc_stats.moves),
42458- atomic_read(&osb->alloc_stats.bg_extends));
42459+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
42460+ atomic_read_unchecked(&osb->alloc_stats.local_data),
42461+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
42462+ atomic_read_unchecked(&osb->alloc_stats.moves),
42463+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
42464
42465 out += snprintf(buf + out, len - out,
42466 "%10s => State: %u Descriptor: %llu Size: %u bits "
42467@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
42468 spin_lock_init(&osb->osb_xattr_lock);
42469 ocfs2_init_steal_slots(osb);
42470
42471- atomic_set(&osb->alloc_stats.moves, 0);
42472- atomic_set(&osb->alloc_stats.local_data, 0);
42473- atomic_set(&osb->alloc_stats.bitmap_data, 0);
42474- atomic_set(&osb->alloc_stats.bg_allocs, 0);
42475- atomic_set(&osb->alloc_stats.bg_extends, 0);
42476+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
42477+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
42478+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
42479+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
42480+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
42481
42482 /* Copy the blockcheck stats from the superblock probe */
42483 osb->osb_ecc_stats = *stats;
42484diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
42485--- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
42486+++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
42487@@ -142,7 +142,7 @@ bail:
42488
42489 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
42490 {
42491- char *link = nd_get_link(nd);
42492+ const char *link = nd_get_link(nd);
42493 if (!IS_ERR(link))
42494 kfree(link);
42495 }
42496diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
42497--- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
42498+++ linux-3.0.4/fs/open.c 2011-09-14 09:16:46.000000000 -0400
42499@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
42500 error = locks_verify_truncate(inode, NULL, length);
42501 if (!error)
42502 error = security_path_truncate(&path);
42503+
42504+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
42505+ error = -EACCES;
42506+
42507 if (!error)
42508 error = do_truncate(path.dentry, length, 0, NULL);
42509
42510@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
42511 if (__mnt_is_readonly(path.mnt))
42512 res = -EROFS;
42513
42514+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
42515+ res = -EACCES;
42516+
42517 out_path_release:
42518 path_put(&path);
42519 out:
42520@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
42521 if (error)
42522 goto dput_and_out;
42523
42524+ gr_log_chdir(path.dentry, path.mnt);
42525+
42526 set_fs_pwd(current->fs, &path);
42527
42528 dput_and_out:
42529@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
42530 goto out_putf;
42531
42532 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
42533+
42534+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
42535+ error = -EPERM;
42536+
42537+ if (!error)
42538+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
42539+
42540 if (!error)
42541 set_fs_pwd(current->fs, &file->f_path);
42542 out_putf:
42543@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
42544 if (error)
42545 goto dput_and_out;
42546
42547+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42548+ goto dput_and_out;
42549+
42550 set_fs_root(current->fs, &path);
42551+
42552+ gr_handle_chroot_chdir(&path);
42553+
42554 error = 0;
42555 dput_and_out:
42556 path_put(&path);
42557@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42558 err = mnt_want_write_file(file);
42559 if (err)
42560 goto out_putf;
42561+
42562 mutex_lock(&inode->i_mutex);
42563+
42564+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
42565+ err = -EACCES;
42566+ goto out_unlock;
42567+ }
42568+
42569 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
42570 if (err)
42571 goto out_unlock;
42572 if (mode == (mode_t) -1)
42573 mode = inode->i_mode;
42574+
42575+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
42576+ err = -EACCES;
42577+ goto out_unlock;
42578+ }
42579+
42580 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42581 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42582 err = notify_change(dentry, &newattrs);
42583@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42584 error = mnt_want_write(path.mnt);
42585 if (error)
42586 goto dput_and_out;
42587+
42588 mutex_lock(&inode->i_mutex);
42589+
42590+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42591+ error = -EACCES;
42592+ goto out_unlock;
42593+ }
42594+
42595 error = security_path_chmod(path.dentry, path.mnt, mode);
42596 if (error)
42597 goto out_unlock;
42598 if (mode == (mode_t) -1)
42599 mode = inode->i_mode;
42600+
42601+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42602+ error = -EACCES;
42603+ goto out_unlock;
42604+ }
42605+
42606 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42607 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42608 error = notify_change(path.dentry, &newattrs);
42609@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
42610 int error;
42611 struct iattr newattrs;
42612
42613+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
42614+ return -EACCES;
42615+
42616 newattrs.ia_valid = ATTR_CTIME;
42617 if (user != (uid_t) -1) {
42618 newattrs.ia_valid |= ATTR_UID;
42619@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
42620 if (!IS_ERR(tmp)) {
42621 fd = get_unused_fd_flags(flags);
42622 if (fd >= 0) {
42623- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
42624+ struct file *f;
42625+ /* don't allow to be set by userland */
42626+ flags &= ~FMODE_GREXEC;
42627+ f = do_filp_open(dfd, tmp, &op, lookup);
42628 if (IS_ERR(f)) {
42629 put_unused_fd(fd);
42630 fd = PTR_ERR(f);
42631diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
42632--- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
42633+++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
42634@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42635 ldm_error ("A VBLK claims to have %d parts.", num);
42636 return false;
42637 }
42638+
42639 if (rec >= num) {
42640 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42641 return false;
42642@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42643 goto found;
42644 }
42645
42646- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42647+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42648 if (!f) {
42649 ldm_crit ("Out of memory.");
42650 return false;
42651diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
42652--- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
42653+++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
42654@@ -420,9 +420,9 @@ redo:
42655 }
42656 if (bufs) /* More to do? */
42657 continue;
42658- if (!pipe->writers)
42659+ if (!atomic_read(&pipe->writers))
42660 break;
42661- if (!pipe->waiting_writers) {
42662+ if (!atomic_read(&pipe->waiting_writers)) {
42663 /* syscall merging: Usually we must not sleep
42664 * if O_NONBLOCK is set, or if we got some data.
42665 * But if a writer sleeps in kernel space, then
42666@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
42667 mutex_lock(&inode->i_mutex);
42668 pipe = inode->i_pipe;
42669
42670- if (!pipe->readers) {
42671+ if (!atomic_read(&pipe->readers)) {
42672 send_sig(SIGPIPE, current, 0);
42673 ret = -EPIPE;
42674 goto out;
42675@@ -530,7 +530,7 @@ redo1:
42676 for (;;) {
42677 int bufs;
42678
42679- if (!pipe->readers) {
42680+ if (!atomic_read(&pipe->readers)) {
42681 send_sig(SIGPIPE, current, 0);
42682 if (!ret)
42683 ret = -EPIPE;
42684@@ -616,9 +616,9 @@ redo2:
42685 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42686 do_wakeup = 0;
42687 }
42688- pipe->waiting_writers++;
42689+ atomic_inc(&pipe->waiting_writers);
42690 pipe_wait(pipe);
42691- pipe->waiting_writers--;
42692+ atomic_dec(&pipe->waiting_writers);
42693 }
42694 out:
42695 mutex_unlock(&inode->i_mutex);
42696@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
42697 mask = 0;
42698 if (filp->f_mode & FMODE_READ) {
42699 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42700- if (!pipe->writers && filp->f_version != pipe->w_counter)
42701+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42702 mask |= POLLHUP;
42703 }
42704
42705@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
42706 * Most Unices do not set POLLERR for FIFOs but on Linux they
42707 * behave exactly like pipes for poll().
42708 */
42709- if (!pipe->readers)
42710+ if (!atomic_read(&pipe->readers))
42711 mask |= POLLERR;
42712 }
42713
42714@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
42715
42716 mutex_lock(&inode->i_mutex);
42717 pipe = inode->i_pipe;
42718- pipe->readers -= decr;
42719- pipe->writers -= decw;
42720+ atomic_sub(decr, &pipe->readers);
42721+ atomic_sub(decw, &pipe->writers);
42722
42723- if (!pipe->readers && !pipe->writers) {
42724+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42725 free_pipe_info(inode);
42726 } else {
42727 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
42728@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
42729
42730 if (inode->i_pipe) {
42731 ret = 0;
42732- inode->i_pipe->readers++;
42733+ atomic_inc(&inode->i_pipe->readers);
42734 }
42735
42736 mutex_unlock(&inode->i_mutex);
42737@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
42738
42739 if (inode->i_pipe) {
42740 ret = 0;
42741- inode->i_pipe->writers++;
42742+ atomic_inc(&inode->i_pipe->writers);
42743 }
42744
42745 mutex_unlock(&inode->i_mutex);
42746@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
42747 if (inode->i_pipe) {
42748 ret = 0;
42749 if (filp->f_mode & FMODE_READ)
42750- inode->i_pipe->readers++;
42751+ atomic_inc(&inode->i_pipe->readers);
42752 if (filp->f_mode & FMODE_WRITE)
42753- inode->i_pipe->writers++;
42754+ atomic_inc(&inode->i_pipe->writers);
42755 }
42756
42757 mutex_unlock(&inode->i_mutex);
42758@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
42759 inode->i_pipe = NULL;
42760 }
42761
42762-static struct vfsmount *pipe_mnt __read_mostly;
42763+struct vfsmount *pipe_mnt __read_mostly;
42764
42765 /*
42766 * pipefs_dname() is called from d_path().
42767@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
42768 goto fail_iput;
42769 inode->i_pipe = pipe;
42770
42771- pipe->readers = pipe->writers = 1;
42772+ atomic_set(&pipe->readers, 1);
42773+ atomic_set(&pipe->writers, 1);
42774 inode->i_fop = &rdwr_pipefifo_fops;
42775
42776 /*
42777diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
42778--- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
42779+++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
42780@@ -60,6 +60,7 @@
42781 #include <linux/tty.h>
42782 #include <linux/string.h>
42783 #include <linux/mman.h>
42784+#include <linux/grsecurity.h>
42785 #include <linux/proc_fs.h>
42786 #include <linux/ioport.h>
42787 #include <linux/uaccess.h>
42788@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
42789 seq_putc(m, '\n');
42790 }
42791
42792+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42793+static inline void task_pax(struct seq_file *m, struct task_struct *p)
42794+{
42795+ if (p->mm)
42796+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42797+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42798+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42799+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42800+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42801+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42802+ else
42803+ seq_printf(m, "PaX:\t-----\n");
42804+}
42805+#endif
42806+
42807 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42808 struct pid *pid, struct task_struct *task)
42809 {
42810@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
42811 task_cpus_allowed(m, task);
42812 cpuset_task_status_allowed(m, task);
42813 task_context_switch_counts(m, task);
42814+
42815+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42816+ task_pax(m, task);
42817+#endif
42818+
42819+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42820+ task_grsec_rbac(m, task);
42821+#endif
42822+
42823 return 0;
42824 }
42825
42826+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42827+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42828+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42829+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42830+#endif
42831+
42832 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42833 struct pid *pid, struct task_struct *task, int whole)
42834 {
42835@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
42836 cputime_t cutime, cstime, utime, stime;
42837 cputime_t cgtime, gtime;
42838 unsigned long rsslim = 0;
42839- char tcomm[sizeof(task->comm)];
42840+ char tcomm[sizeof(task->comm)] = { 0 };
42841 unsigned long flags;
42842
42843+ pax_track_stack();
42844+
42845 state = *get_task_state(task);
42846 vsize = eip = esp = 0;
42847 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42848@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
42849 gtime = task->gtime;
42850 }
42851
42852+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42853+ if (PAX_RAND_FLAGS(mm)) {
42854+ eip = 0;
42855+ esp = 0;
42856+ wchan = 0;
42857+ }
42858+#endif
42859+#ifdef CONFIG_GRKERNSEC_HIDESYM
42860+ wchan = 0;
42861+ eip =0;
42862+ esp =0;
42863+#endif
42864+
42865 /* scale priority and nice values from timeslices to -20..20 */
42866 /* to make it look like a "normal" Unix priority/nice value */
42867 priority = task_prio(task);
42868@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
42869 vsize,
42870 mm ? get_mm_rss(mm) : 0,
42871 rsslim,
42872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42873+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42874+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42875+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42876+#else
42877 mm ? (permitted ? mm->start_code : 1) : 0,
42878 mm ? (permitted ? mm->end_code : 1) : 0,
42879 (permitted && mm) ? mm->start_stack : 0,
42880+#endif
42881 esp,
42882 eip,
42883 /* The signal information here is obsolete.
42884@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
42885
42886 return 0;
42887 }
42888+
42889+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42890+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42891+{
42892+ u32 curr_ip = 0;
42893+ unsigned long flags;
42894+
42895+ if (lock_task_sighand(task, &flags)) {
42896+ curr_ip = task->signal->curr_ip;
42897+ unlock_task_sighand(task, &flags);
42898+ }
42899+
42900+ return sprintf(buffer, "%pI4\n", &curr_ip);
42901+}
42902+#endif
42903diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
42904--- linux-3.0.4/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
42905+++ linux-3.0.4/fs/proc/base.c 2011-09-13 14:50:28.000000000 -0400
42906@@ -107,6 +107,22 @@ struct pid_entry {
42907 union proc_op op;
42908 };
42909
42910+struct getdents_callback {
42911+ struct linux_dirent __user * current_dir;
42912+ struct linux_dirent __user * previous;
42913+ struct file * file;
42914+ int count;
42915+ int error;
42916+};
42917+
42918+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42919+ loff_t offset, u64 ino, unsigned int d_type)
42920+{
42921+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
42922+ buf->error = -EINVAL;
42923+ return 0;
42924+}
42925+
42926 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42927 .name = (NAME), \
42928 .len = sizeof(NAME) - 1, \
42929@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
42930 if (task == current)
42931 return mm;
42932
42933+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42934+ return ERR_PTR(-EPERM);
42935+
42936 /*
42937 * If current is actively ptrace'ing, and would also be
42938 * permitted to freshly attach with ptrace now, permit it.
42939@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
42940 if (!mm->arg_end)
42941 goto out_mm; /* Shh! No looking before we're done */
42942
42943+ if (gr_acl_handle_procpidmem(task))
42944+ goto out_mm;
42945+
42946 len = mm->arg_end - mm->arg_start;
42947
42948 if (len > PAGE_SIZE)
42949@@ -309,12 +331,28 @@ out:
42950 return res;
42951 }
42952
42953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42954+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42955+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42956+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42957+#endif
42958+
42959 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42960 {
42961 struct mm_struct *mm = mm_for_maps(task);
42962 int res = PTR_ERR(mm);
42963 if (mm && !IS_ERR(mm)) {
42964 unsigned int nwords = 0;
42965+
42966+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42967+ /* allow if we're currently ptracing this task */
42968+ if (PAX_RAND_FLAGS(mm) &&
42969+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42970+ mmput(mm);
42971+ return 0;
42972+ }
42973+#endif
42974+
42975 do {
42976 nwords += 2;
42977 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42978@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
42979 }
42980
42981
42982-#ifdef CONFIG_KALLSYMS
42983+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42984 /*
42985 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42986 * Returns the resolved symbol. If that fails, simply return the address.
42987@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
42988 mutex_unlock(&task->signal->cred_guard_mutex);
42989 }
42990
42991-#ifdef CONFIG_STACKTRACE
42992+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42993
42994 #define MAX_STACK_TRACE_DEPTH 64
42995
42996@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
42997 return count;
42998 }
42999
43000-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43001+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43002 static int proc_pid_syscall(struct task_struct *task, char *buffer)
43003 {
43004 long nr;
43005@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
43006 /************************************************************************/
43007
43008 /* permission checks */
43009-static int proc_fd_access_allowed(struct inode *inode)
43010+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
43011 {
43012 struct task_struct *task;
43013 int allowed = 0;
43014@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
43015 */
43016 task = get_proc_task(inode);
43017 if (task) {
43018- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
43019+ if (log)
43020+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
43021+ else
43022+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
43023 put_task_struct(task);
43024 }
43025 return allowed;
43026@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
43027 if (!task)
43028 goto out_no_task;
43029
43030+ if (gr_acl_handle_procpidmem(task))
43031+ goto out;
43032+
43033 ret = -ENOMEM;
43034 page = (char *)__get_free_page(GFP_TEMPORARY);
43035 if (!page)
43036@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
43037 path_put(&nd->path);
43038
43039 /* Are we allowed to snoop on the tasks file descriptors? */
43040- if (!proc_fd_access_allowed(inode))
43041+ if (!proc_fd_access_allowed(inode,0))
43042 goto out;
43043
43044 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
43045@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
43046 struct path path;
43047
43048 /* Are we allowed to snoop on the tasks file descriptors? */
43049- if (!proc_fd_access_allowed(inode))
43050- goto out;
43051+ /* logging this is needed for learning on chromium to work properly,
43052+ but we don't want to flood the logs from 'ps' which does a readlink
43053+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
43054+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
43055+ */
43056+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
43057+ if (!proc_fd_access_allowed(inode,0))
43058+ goto out;
43059+ } else {
43060+ if (!proc_fd_access_allowed(inode,1))
43061+ goto out;
43062+ }
43063
43064 error = PROC_I(inode)->op.proc_get_link(inode, &path);
43065 if (error)
43066@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
43067 rcu_read_lock();
43068 cred = __task_cred(task);
43069 inode->i_uid = cred->euid;
43070+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43071+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43072+#else
43073 inode->i_gid = cred->egid;
43074+#endif
43075 rcu_read_unlock();
43076 }
43077 security_task_to_inode(task, inode);
43078@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
43079 struct inode *inode = dentry->d_inode;
43080 struct task_struct *task;
43081 const struct cred *cred;
43082+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43083+ const struct cred *tmpcred = current_cred();
43084+#endif
43085
43086 generic_fillattr(inode, stat);
43087
43088@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
43089 stat->uid = 0;
43090 stat->gid = 0;
43091 task = pid_task(proc_pid(inode), PIDTYPE_PID);
43092+
43093+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
43094+ rcu_read_unlock();
43095+ return -ENOENT;
43096+ }
43097+
43098 if (task) {
43099+ cred = __task_cred(task);
43100+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43101+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
43102+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43103+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43104+#endif
43105+ ) {
43106+#endif
43107 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
43108+#ifdef CONFIG_GRKERNSEC_PROC_USER
43109+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
43110+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43111+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
43112+#endif
43113 task_dumpable(task)) {
43114- cred = __task_cred(task);
43115 stat->uid = cred->euid;
43116+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43117+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
43118+#else
43119 stat->gid = cred->egid;
43120+#endif
43121 }
43122+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43123+ } else {
43124+ rcu_read_unlock();
43125+ return -ENOENT;
43126+ }
43127+#endif
43128 }
43129 rcu_read_unlock();
43130 return 0;
43131@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
43132
43133 if (task) {
43134 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
43135+#ifdef CONFIG_GRKERNSEC_PROC_USER
43136+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
43137+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43138+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
43139+#endif
43140 task_dumpable(task)) {
43141 rcu_read_lock();
43142 cred = __task_cred(task);
43143 inode->i_uid = cred->euid;
43144+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43145+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43146+#else
43147 inode->i_gid = cred->egid;
43148+#endif
43149 rcu_read_unlock();
43150 } else {
43151 inode->i_uid = 0;
43152@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
43153 int fd = proc_fd(inode);
43154
43155 if (task) {
43156- files = get_files_struct(task);
43157+ if (!gr_acl_handle_procpidmem(task))
43158+ files = get_files_struct(task);
43159 put_task_struct(task);
43160 }
43161 if (files) {
43162@@ -2169,11 +2268,21 @@ static const struct file_operations proc
43163 */
43164 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
43165 {
43166+ struct task_struct *task;
43167 int rv = generic_permission(inode, mask, flags, NULL);
43168- if (rv == 0)
43169- return 0;
43170+
43171 if (task_pid(current) == proc_pid(inode))
43172 rv = 0;
43173+
43174+ task = get_proc_task(inode);
43175+ if (task == NULL)
43176+ return rv;
43177+
43178+ if (gr_acl_handle_procpidmem(task))
43179+ rv = -EACCES;
43180+
43181+ put_task_struct(task);
43182+
43183 return rv;
43184 }
43185
43186@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
43187 if (!task)
43188 goto out_no_task;
43189
43190+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43191+ goto out;
43192+
43193 /*
43194 * Yes, it does not scale. And it should not. Don't add
43195 * new entries into /proc/<tgid>/ without very good reasons.
43196@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
43197 if (!task)
43198 goto out_no_task;
43199
43200+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43201+ goto out;
43202+
43203 ret = 0;
43204 i = filp->f_pos;
43205 switch (i) {
43206@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
43207 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
43208 void *cookie)
43209 {
43210- char *s = nd_get_link(nd);
43211+ const char *s = nd_get_link(nd);
43212 if (!IS_ERR(s))
43213 __putname(s);
43214 }
43215@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
43216 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
43217 #endif
43218 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
43219-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43220+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43221 INF("syscall", S_IRUGO, proc_pid_syscall),
43222 #endif
43223 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43224@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
43225 #ifdef CONFIG_SECURITY
43226 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43227 #endif
43228-#ifdef CONFIG_KALLSYMS
43229+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43230 INF("wchan", S_IRUGO, proc_pid_wchan),
43231 #endif
43232-#ifdef CONFIG_STACKTRACE
43233+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43234 ONE("stack", S_IRUGO, proc_pid_stack),
43235 #endif
43236 #ifdef CONFIG_SCHEDSTATS
43237@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
43238 #ifdef CONFIG_HARDWALL
43239 INF("hardwall", S_IRUGO, proc_pid_hardwall),
43240 #endif
43241+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43242+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
43243+#endif
43244 };
43245
43246 static int proc_tgid_base_readdir(struct file * filp,
43247@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
43248 if (!inode)
43249 goto out;
43250
43251+#ifdef CONFIG_GRKERNSEC_PROC_USER
43252+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
43253+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43254+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43255+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
43256+#else
43257 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
43258+#endif
43259 inode->i_op = &proc_tgid_base_inode_operations;
43260 inode->i_fop = &proc_tgid_base_operations;
43261 inode->i_flags|=S_IMMUTABLE;
43262@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
43263 if (!task)
43264 goto out;
43265
43266+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43267+ goto out_put_task;
43268+
43269 result = proc_pid_instantiate(dir, dentry, task, NULL);
43270+out_put_task:
43271 put_task_struct(task);
43272 out:
43273 return result;
43274@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
43275 {
43276 unsigned int nr;
43277 struct task_struct *reaper;
43278+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43279+ const struct cred *tmpcred = current_cred();
43280+ const struct cred *itercred;
43281+#endif
43282+ filldir_t __filldir = filldir;
43283 struct tgid_iter iter;
43284 struct pid_namespace *ns;
43285
43286@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
43287 for (iter = next_tgid(ns, iter);
43288 iter.task;
43289 iter.tgid += 1, iter = next_tgid(ns, iter)) {
43290+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43291+ rcu_read_lock();
43292+ itercred = __task_cred(iter.task);
43293+#endif
43294+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
43295+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43296+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
43297+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43298+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43299+#endif
43300+ )
43301+#endif
43302+ )
43303+ __filldir = &gr_fake_filldir;
43304+ else
43305+ __filldir = filldir;
43306+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43307+ rcu_read_unlock();
43308+#endif
43309 filp->f_pos = iter.tgid + TGID_OFFSET;
43310- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
43311+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
43312 put_task_struct(iter.task);
43313 goto out;
43314 }
43315@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
43316 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
43317 #endif
43318 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
43319-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43320+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43321 INF("syscall", S_IRUGO, proc_pid_syscall),
43322 #endif
43323 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43324@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
43325 #ifdef CONFIG_SECURITY
43326 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43327 #endif
43328-#ifdef CONFIG_KALLSYMS
43329+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43330 INF("wchan", S_IRUGO, proc_pid_wchan),
43331 #endif
43332-#ifdef CONFIG_STACKTRACE
43333+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43334 ONE("stack", S_IRUGO, proc_pid_stack),
43335 #endif
43336 #ifdef CONFIG_SCHEDSTATS
43337diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
43338--- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
43339+++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
43340@@ -23,7 +23,11 @@ static const struct file_operations cmdl
43341
43342 static int __init proc_cmdline_init(void)
43343 {
43344+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43345+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
43346+#else
43347 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
43348+#endif
43349 return 0;
43350 }
43351 module_init(proc_cmdline_init);
43352diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
43353--- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
43354+++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
43355@@ -64,7 +64,11 @@ static const struct file_operations proc
43356
43357 static int __init proc_devices_init(void)
43358 {
43359+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43360+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
43361+#else
43362 proc_create("devices", 0, NULL, &proc_devinfo_operations);
43363+#endif
43364 return 0;
43365 }
43366 module_init(proc_devices_init);
43367diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
43368--- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
43369+++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
43370@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
43371 if (de->mode) {
43372 inode->i_mode = de->mode;
43373 inode->i_uid = de->uid;
43374+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43375+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43376+#else
43377 inode->i_gid = de->gid;
43378+#endif
43379 }
43380 if (de->size)
43381 inode->i_size = de->size;
43382diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
43383--- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
43384+++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
43385@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
43386 struct pid *pid, struct task_struct *task);
43387 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
43388 struct pid *pid, struct task_struct *task);
43389+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43390+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
43391+#endif
43392 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
43393
43394 extern const struct file_operations proc_maps_operations;
43395diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
43396--- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
43397+++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
43398@@ -30,12 +30,12 @@ config PROC_FS
43399
43400 config PROC_KCORE
43401 bool "/proc/kcore support" if !ARM
43402- depends on PROC_FS && MMU
43403+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
43404
43405 config PROC_VMCORE
43406 bool "/proc/vmcore support"
43407- depends on PROC_FS && CRASH_DUMP
43408- default y
43409+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
43410+ default n
43411 help
43412 Exports the dump image of crashed kernel in ELF format.
43413
43414@@ -59,8 +59,8 @@ config PROC_SYSCTL
43415 limited in memory.
43416
43417 config PROC_PAGE_MONITOR
43418- default y
43419- depends on PROC_FS && MMU
43420+ default n
43421+ depends on PROC_FS && MMU && !GRKERNSEC
43422 bool "Enable /proc page monitoring" if EXPERT
43423 help
43424 Various /proc files exist to monitor process memory utilization:
43425diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
43426--- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
43427+++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
43428@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
43429 off_t offset = 0;
43430 struct kcore_list *m;
43431
43432+ pax_track_stack();
43433+
43434 /* setup ELF header */
43435 elf = (struct elfhdr *) bufp;
43436 bufp += sizeof(struct elfhdr);
43437@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
43438 * the addresses in the elf_phdr on our list.
43439 */
43440 start = kc_offset_to_vaddr(*fpos - elf_buflen);
43441- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
43442+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
43443+ if (tsz > buflen)
43444 tsz = buflen;
43445-
43446+
43447 while (buflen) {
43448 struct kcore_list *m;
43449
43450@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
43451 kfree(elf_buf);
43452 } else {
43453 if (kern_addr_valid(start)) {
43454- unsigned long n;
43455+ char *elf_buf;
43456+ mm_segment_t oldfs;
43457
43458- n = copy_to_user(buffer, (char *)start, tsz);
43459- /*
43460- * We cannot distingush between fault on source
43461- * and fault on destination. When this happens
43462- * we clear too and hope it will trigger the
43463- * EFAULT again.
43464- */
43465- if (n) {
43466- if (clear_user(buffer + tsz - n,
43467- n))
43468+ elf_buf = kmalloc(tsz, GFP_KERNEL);
43469+ if (!elf_buf)
43470+ return -ENOMEM;
43471+ oldfs = get_fs();
43472+ set_fs(KERNEL_DS);
43473+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
43474+ set_fs(oldfs);
43475+ if (copy_to_user(buffer, elf_buf, tsz)) {
43476+ kfree(elf_buf);
43477 return -EFAULT;
43478+ }
43479 }
43480+ set_fs(oldfs);
43481+ kfree(elf_buf);
43482 } else {
43483 if (clear_user(buffer, tsz))
43484 return -EFAULT;
43485@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
43486
43487 static int open_kcore(struct inode *inode, struct file *filp)
43488 {
43489+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
43490+ return -EPERM;
43491+#endif
43492 if (!capable(CAP_SYS_RAWIO))
43493 return -EPERM;
43494 if (kcore_need_update)
43495diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
43496--- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
43497+++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
43498@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
43499 unsigned long pages[NR_LRU_LISTS];
43500 int lru;
43501
43502+ pax_track_stack();
43503+
43504 /*
43505 * display in kilobytes.
43506 */
43507@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
43508 vmi.used >> 10,
43509 vmi.largest_chunk >> 10
43510 #ifdef CONFIG_MEMORY_FAILURE
43511- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
43512+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
43513 #endif
43514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
43515 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
43516diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
43517--- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
43518+++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
43519@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
43520 if (len < 1)
43521 len = 1;
43522 seq_printf(m, "%*c", len, ' ');
43523- seq_path(m, &file->f_path, "");
43524+ seq_path(m, &file->f_path, "\n\\");
43525 }
43526
43527 seq_putc(m, '\n');
43528diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
43529--- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
43530+++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
43531@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
43532 struct task_struct *task;
43533 struct nsproxy *ns;
43534 struct net *net = NULL;
43535+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43536+ const struct cred *cred = current_cred();
43537+#endif
43538+
43539+#ifdef CONFIG_GRKERNSEC_PROC_USER
43540+ if (cred->fsuid)
43541+ return net;
43542+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43543+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43544+ return net;
43545+#endif
43546
43547 rcu_read_lock();
43548 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43549diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
43550--- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
43551+++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
43552@@ -8,6 +8,8 @@
43553 #include <linux/namei.h>
43554 #include "internal.h"
43555
43556+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43557+
43558 static const struct dentry_operations proc_sys_dentry_operations;
43559 static const struct file_operations proc_sys_file_operations;
43560 static const struct inode_operations proc_sys_inode_operations;
43561@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
43562 if (!p)
43563 goto out;
43564
43565+ if (gr_handle_sysctl(p, MAY_EXEC))
43566+ goto out;
43567+
43568 err = ERR_PTR(-ENOMEM);
43569 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43570 if (h)
43571@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
43572 if (*pos < file->f_pos)
43573 continue;
43574
43575+ if (gr_handle_sysctl(table, 0))
43576+ continue;
43577+
43578 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43579 if (res)
43580 return res;
43581@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
43582 if (IS_ERR(head))
43583 return PTR_ERR(head);
43584
43585+ if (table && gr_handle_sysctl(table, MAY_EXEC))
43586+ return -ENOENT;
43587+
43588 generic_fillattr(inode, stat);
43589 if (table)
43590 stat->mode = (stat->mode & S_IFMT) | table->mode;
43591diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
43592--- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
43593+++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
43594@@ -123,7 +123,15 @@ void __init proc_root_init(void)
43595 #ifdef CONFIG_PROC_DEVICETREE
43596 proc_device_tree_init();
43597 #endif
43598+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43599+#ifdef CONFIG_GRKERNSEC_PROC_USER
43600+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43601+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43602+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43603+#endif
43604+#else
43605 proc_mkdir("bus", NULL);
43606+#endif
43607 proc_sys_init();
43608 }
43609
43610diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
43611--- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
43612+++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
43613@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
43614 "VmExe:\t%8lu kB\n"
43615 "VmLib:\t%8lu kB\n"
43616 "VmPTE:\t%8lu kB\n"
43617- "VmSwap:\t%8lu kB\n",
43618- hiwater_vm << (PAGE_SHIFT-10),
43619+ "VmSwap:\t%8lu kB\n"
43620+
43621+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43622+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43623+#endif
43624+
43625+ ,hiwater_vm << (PAGE_SHIFT-10),
43626 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43627 mm->locked_vm << (PAGE_SHIFT-10),
43628 hiwater_rss << (PAGE_SHIFT-10),
43629@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
43630 data << (PAGE_SHIFT-10),
43631 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43632 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
43633- swap << (PAGE_SHIFT-10));
43634+ swap << (PAGE_SHIFT-10)
43635+
43636+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43637+ , mm->context.user_cs_base, mm->context.user_cs_limit
43638+#endif
43639+
43640+ );
43641 }
43642
43643 unsigned long task_vsize(struct mm_struct *mm)
43644@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
43645 return ret;
43646 }
43647
43648+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43649+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43650+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43651+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43652+#endif
43653+
43654 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43655 {
43656 struct mm_struct *mm = vma->vm_mm;
43657@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
43658 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43659 }
43660
43661- /* We don't show the stack guard page in /proc/maps */
43662+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43663+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
43664+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
43665+#else
43666 start = vma->vm_start;
43667- if (stack_guard_page_start(vma, start))
43668- start += PAGE_SIZE;
43669 end = vma->vm_end;
43670- if (stack_guard_page_end(vma, end))
43671- end -= PAGE_SIZE;
43672+#endif
43673
43674 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43675 start,
43676@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
43677 flags & VM_WRITE ? 'w' : '-',
43678 flags & VM_EXEC ? 'x' : '-',
43679 flags & VM_MAYSHARE ? 's' : 'p',
43680+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43681+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43682+#else
43683 pgoff,
43684+#endif
43685 MAJOR(dev), MINOR(dev), ino, &len);
43686
43687 /*
43688@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
43689 */
43690 if (file) {
43691 pad_len_spaces(m, len);
43692- seq_path(m, &file->f_path, "\n");
43693+ seq_path(m, &file->f_path, "\n\\");
43694 } else {
43695 const char *name = arch_vma_name(vma);
43696 if (!name) {
43697@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
43698 if (vma->vm_start <= mm->brk &&
43699 vma->vm_end >= mm->start_brk) {
43700 name = "[heap]";
43701- } else if (vma->vm_start <= mm->start_stack &&
43702- vma->vm_end >= mm->start_stack) {
43703+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43704+ (vma->vm_start <= mm->start_stack &&
43705+ vma->vm_end >= mm->start_stack)) {
43706 name = "[stack]";
43707 }
43708 } else {
43709@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
43710 };
43711
43712 memset(&mss, 0, sizeof mss);
43713- mss.vma = vma;
43714- /* mmap_sem is held in m_start */
43715- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43716- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43717-
43718+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43719+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43720+#endif
43721+ mss.vma = vma;
43722+ /* mmap_sem is held in m_start */
43723+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43724+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43725+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43726+ }
43727+#endif
43728 show_map_vma(m, vma);
43729
43730 seq_printf(m,
43731@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
43732 "KernelPageSize: %8lu kB\n"
43733 "MMUPageSize: %8lu kB\n"
43734 "Locked: %8lu kB\n",
43735+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43736+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43737+#else
43738 (vma->vm_end - vma->vm_start) >> 10,
43739+#endif
43740 mss.resident >> 10,
43741 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43742 mss.shared_clean >> 10,
43743@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
43744
43745 if (file) {
43746 seq_printf(m, " file=");
43747- seq_path(m, &file->f_path, "\n\t= ");
43748+ seq_path(m, &file->f_path, "\n\t\\= ");
43749 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
43750 seq_printf(m, " heap");
43751 } else if (vma->vm_start <= mm->start_stack &&
43752diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
43753--- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
43754+++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
43755@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
43756 else
43757 bytes += kobjsize(mm);
43758
43759- if (current->fs && current->fs->users > 1)
43760+ if (current->fs && atomic_read(&current->fs->users) > 1)
43761 sbytes += kobjsize(current->fs);
43762 else
43763 bytes += kobjsize(current->fs);
43764@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
43765
43766 if (file) {
43767 pad_len_spaces(m, len);
43768- seq_path(m, &file->f_path, "");
43769+ seq_path(m, &file->f_path, "\n\\");
43770 } else if (mm) {
43771 if (vma->vm_start <= mm->start_stack &&
43772 vma->vm_end >= mm->start_stack) {
43773diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
43774--- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
43775+++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
43776@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
43777 void quota_send_warning(short type, unsigned int id, dev_t dev,
43778 const char warntype)
43779 {
43780- static atomic_t seq;
43781+ static atomic_unchecked_t seq;
43782 struct sk_buff *skb;
43783 void *msg_head;
43784 int ret;
43785@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
43786 "VFS: Not enough memory to send quota warning.\n");
43787 return;
43788 }
43789- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
43790+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
43791 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
43792 if (!msg_head) {
43793 printk(KERN_ERR
43794diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
43795--- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
43796+++ linux-3.0.4/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
43797@@ -17,6 +17,7 @@
43798 #include <linux/security.h>
43799 #include <linux/syscalls.h>
43800 #include <linux/unistd.h>
43801+#include <linux/namei.h>
43802
43803 #include <asm/uaccess.h>
43804
43805@@ -67,6 +68,7 @@ struct old_linux_dirent {
43806
43807 struct readdir_callback {
43808 struct old_linux_dirent __user * dirent;
43809+ struct file * file;
43810 int result;
43811 };
43812
43813@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43814 buf->result = -EOVERFLOW;
43815 return -EOVERFLOW;
43816 }
43817+
43818+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43819+ return 0;
43820+
43821 buf->result++;
43822 dirent = buf->dirent;
43823 if (!access_ok(VERIFY_WRITE, dirent,
43824@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43825
43826 buf.result = 0;
43827 buf.dirent = dirent;
43828+ buf.file = file;
43829
43830 error = vfs_readdir(file, fillonedir, &buf);
43831 if (buf.result)
43832@@ -142,6 +149,7 @@ struct linux_dirent {
43833 struct getdents_callback {
43834 struct linux_dirent __user * current_dir;
43835 struct linux_dirent __user * previous;
43836+ struct file * file;
43837 int count;
43838 int error;
43839 };
43840@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
43841 buf->error = -EOVERFLOW;
43842 return -EOVERFLOW;
43843 }
43844+
43845+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43846+ return 0;
43847+
43848 dirent = buf->previous;
43849 if (dirent) {
43850 if (__put_user(offset, &dirent->d_off))
43851@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43852 buf.previous = NULL;
43853 buf.count = count;
43854 buf.error = 0;
43855+ buf.file = file;
43856
43857 error = vfs_readdir(file, filldir, &buf);
43858 if (error >= 0)
43859@@ -229,6 +242,7 @@ out:
43860 struct getdents_callback64 {
43861 struct linux_dirent64 __user * current_dir;
43862 struct linux_dirent64 __user * previous;
43863+ struct file *file;
43864 int count;
43865 int error;
43866 };
43867@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
43868 buf->error = -EINVAL; /* only used if we fail.. */
43869 if (reclen > buf->count)
43870 return -EINVAL;
43871+
43872+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43873+ return 0;
43874+
43875 dirent = buf->previous;
43876 if (dirent) {
43877 if (__put_user(offset, &dirent->d_off))
43878@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43879
43880 buf.current_dir = dirent;
43881 buf.previous = NULL;
43882+ buf.file = file;
43883 buf.count = count;
43884 buf.error = 0;
43885
43886diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
43887--- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43888+++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
43889@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43890 struct reiserfs_dir_entry de;
43891 int ret = 0;
43892
43893+ pax_track_stack();
43894+
43895 reiserfs_write_lock(inode->i_sb);
43896
43897 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43898diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
43899--- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
43900+++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
43901@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
43902 return;
43903 }
43904
43905- atomic_inc(&(fs_generation(tb->tb_sb)));
43906+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43907 do_balance_starts(tb);
43908
43909 /* balance leaf returns 0 except if combining L R and S into
43910diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
43911--- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
43912+++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
43913@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
43914 struct buffer_head *bh;
43915 int i, j;
43916
43917+ pax_track_stack();
43918+
43919 bh = __getblk(dev, block, bufsize);
43920 if (buffer_uptodate(bh))
43921 return (bh);
43922diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
43923--- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
43924+++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
43925@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
43926 unsigned long savelink = 1;
43927 struct timespec ctime;
43928
43929+ pax_track_stack();
43930+
43931 /* three balancings: (1) old name removal, (2) new name insertion
43932 and (3) maybe "save" link insertion
43933 stat data updates: (1) old directory,
43934diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
43935--- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
43936+++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
43937@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
43938 "SMALL_TAILS " : "NO_TAILS ",
43939 replay_only(sb) ? "REPLAY_ONLY " : "",
43940 convert_reiserfs(sb) ? "CONV " : "",
43941- atomic_read(&r->s_generation_counter),
43942+ atomic_read_unchecked(&r->s_generation_counter),
43943 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43944 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43945 SF(s_good_search_by_key_reada), SF(s_bmaps),
43946@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
43947 struct journal_params *jp = &rs->s_v1.s_journal;
43948 char b[BDEVNAME_SIZE];
43949
43950+ pax_track_stack();
43951+
43952 seq_printf(m, /* on-disk fields */
43953 "jp_journal_1st_block: \t%i\n"
43954 "jp_journal_dev: \t%s[%x]\n"
43955diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
43956--- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
43957+++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
43958@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
43959 int iter = 0;
43960 #endif
43961
43962+ pax_track_stack();
43963+
43964 BUG_ON(!th->t_trans_id);
43965
43966 init_tb_struct(th, &s_del_balance, sb, path,
43967@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
43968 int retval;
43969 int quota_cut_bytes = 0;
43970
43971+ pax_track_stack();
43972+
43973 BUG_ON(!th->t_trans_id);
43974
43975 le_key2cpu_key(&cpu_key, key);
43976@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
43977 int quota_cut_bytes;
43978 loff_t tail_pos = 0;
43979
43980+ pax_track_stack();
43981+
43982 BUG_ON(!th->t_trans_id);
43983
43984 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43985@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
43986 int retval;
43987 int fs_gen;
43988
43989+ pax_track_stack();
43990+
43991 BUG_ON(!th->t_trans_id);
43992
43993 fs_gen = get_generation(inode->i_sb);
43994@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
43995 int fs_gen = 0;
43996 int quota_bytes = 0;
43997
43998+ pax_track_stack();
43999+
44000 BUG_ON(!th->t_trans_id);
44001
44002 if (inode) { /* Do we count quotas for item? */
44003diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
44004--- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
44005+++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
44006@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
44007 {.option_name = NULL}
44008 };
44009
44010+ pax_track_stack();
44011+
44012 *blocks = 0;
44013 if (!options || !*options)
44014 /* use default configuration: create tails, journaling on, no
44015diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
44016--- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
44017+++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
44018@@ -20,6 +20,7 @@
44019 #include <linux/module.h>
44020 #include <linux/slab.h>
44021 #include <linux/poll.h>
44022+#include <linux/security.h>
44023 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
44024 #include <linux/file.h>
44025 #include <linux/fdtable.h>
44026@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
44027 int retval, i, timed_out = 0;
44028 unsigned long slack = 0;
44029
44030+ pax_track_stack();
44031+
44032 rcu_read_lock();
44033 retval = max_select_fd(n, fds);
44034 rcu_read_unlock();
44035@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
44036 /* Allocate small arguments on the stack to save memory and be faster */
44037 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
44038
44039+ pax_track_stack();
44040+
44041 ret = -EINVAL;
44042 if (n < 0)
44043 goto out_nofds;
44044@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
44045 struct poll_list *walk = head;
44046 unsigned long todo = nfds;
44047
44048+ pax_track_stack();
44049+
44050+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
44051 if (nfds > rlimit(RLIMIT_NOFILE))
44052 return -EINVAL;
44053
44054diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
44055--- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
44056+++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
44057@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
44058 return 0;
44059 }
44060 if (!m->buf) {
44061- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
44062+ m->size = PAGE_SIZE;
44063+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44064 if (!m->buf)
44065 return -ENOMEM;
44066 }
44067@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
44068 Eoverflow:
44069 m->op->stop(m, p);
44070 kfree(m->buf);
44071- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
44072+ m->size <<= 1;
44073+ m->buf = kmalloc(m->size, GFP_KERNEL);
44074 return !m->buf ? -ENOMEM : -EAGAIN;
44075 }
44076
44077@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
44078 m->version = file->f_version;
44079 /* grab buffer if we didn't have one */
44080 if (!m->buf) {
44081- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
44082+ m->size = PAGE_SIZE;
44083+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44084 if (!m->buf)
44085 goto Enomem;
44086 }
44087@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
44088 goto Fill;
44089 m->op->stop(m, p);
44090 kfree(m->buf);
44091- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
44092+ m->size <<= 1;
44093+ m->buf = kmalloc(m->size, GFP_KERNEL);
44094 if (!m->buf)
44095 goto Enomem;
44096 m->count = 0;
44097@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
44098 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
44099 void *data)
44100 {
44101- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
44102+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
44103 int res = -ENOMEM;
44104
44105 if (op) {
44106diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
44107--- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
44108+++ linux-3.0.4/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
44109@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
44110 pipe_lock(pipe);
44111
44112 for (;;) {
44113- if (!pipe->readers) {
44114+ if (!atomic_read(&pipe->readers)) {
44115 send_sig(SIGPIPE, current, 0);
44116 if (!ret)
44117 ret = -EPIPE;
44118@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
44119 do_wakeup = 0;
44120 }
44121
44122- pipe->waiting_writers++;
44123+ atomic_inc(&pipe->waiting_writers);
44124 pipe_wait(pipe);
44125- pipe->waiting_writers--;
44126+ atomic_dec(&pipe->waiting_writers);
44127 }
44128
44129 pipe_unlock(pipe);
44130@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
44131 .spd_release = spd_release_page,
44132 };
44133
44134+ pax_track_stack();
44135+
44136 if (splice_grow_spd(pipe, &spd))
44137 return -ENOMEM;
44138
44139@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
44140 old_fs = get_fs();
44141 set_fs(get_ds());
44142 /* The cast to a user pointer is valid due to the set_fs() */
44143- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
44144+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
44145 set_fs(old_fs);
44146
44147 return res;
44148@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
44149 old_fs = get_fs();
44150 set_fs(get_ds());
44151 /* The cast to a user pointer is valid due to the set_fs() */
44152- res = vfs_write(file, (const char __user *)buf, count, &pos);
44153+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
44154 set_fs(old_fs);
44155
44156 return res;
44157@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
44158 .spd_release = spd_release_page,
44159 };
44160
44161+ pax_track_stack();
44162+
44163 if (splice_grow_spd(pipe, &spd))
44164 return -ENOMEM;
44165
44166@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
44167 goto err;
44168
44169 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
44170- vec[i].iov_base = (void __user *) page_address(page);
44171+ vec[i].iov_base = (__force void __user *) page_address(page);
44172 vec[i].iov_len = this_len;
44173 spd.pages[i] = page;
44174 spd.nr_pages++;
44175@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
44176 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
44177 {
44178 while (!pipe->nrbufs) {
44179- if (!pipe->writers)
44180+ if (!atomic_read(&pipe->writers))
44181 return 0;
44182
44183- if (!pipe->waiting_writers && sd->num_spliced)
44184+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
44185 return 0;
44186
44187 if (sd->flags & SPLICE_F_NONBLOCK)
44188@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
44189 * out of the pipe right after the splice_to_pipe(). So set
44190 * PIPE_READERS appropriately.
44191 */
44192- pipe->readers = 1;
44193+ atomic_set(&pipe->readers, 1);
44194
44195 current->splice_pipe = pipe;
44196 }
44197@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
44198 };
44199 long ret;
44200
44201+ pax_track_stack();
44202+
44203 pipe = get_pipe_info(file);
44204 if (!pipe)
44205 return -EBADF;
44206@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
44207 ret = -ERESTARTSYS;
44208 break;
44209 }
44210- if (!pipe->writers)
44211+ if (!atomic_read(&pipe->writers))
44212 break;
44213- if (!pipe->waiting_writers) {
44214+ if (!atomic_read(&pipe->waiting_writers)) {
44215 if (flags & SPLICE_F_NONBLOCK) {
44216 ret = -EAGAIN;
44217 break;
44218@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
44219 pipe_lock(pipe);
44220
44221 while (pipe->nrbufs >= pipe->buffers) {
44222- if (!pipe->readers) {
44223+ if (!atomic_read(&pipe->readers)) {
44224 send_sig(SIGPIPE, current, 0);
44225 ret = -EPIPE;
44226 break;
44227@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
44228 ret = -ERESTARTSYS;
44229 break;
44230 }
44231- pipe->waiting_writers++;
44232+ atomic_inc(&pipe->waiting_writers);
44233 pipe_wait(pipe);
44234- pipe->waiting_writers--;
44235+ atomic_dec(&pipe->waiting_writers);
44236 }
44237
44238 pipe_unlock(pipe);
44239@@ -1819,14 +1825,14 @@ retry:
44240 pipe_double_lock(ipipe, opipe);
44241
44242 do {
44243- if (!opipe->readers) {
44244+ if (!atomic_read(&opipe->readers)) {
44245 send_sig(SIGPIPE, current, 0);
44246 if (!ret)
44247 ret = -EPIPE;
44248 break;
44249 }
44250
44251- if (!ipipe->nrbufs && !ipipe->writers)
44252+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
44253 break;
44254
44255 /*
44256@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
44257 pipe_double_lock(ipipe, opipe);
44258
44259 do {
44260- if (!opipe->readers) {
44261+ if (!atomic_read(&opipe->readers)) {
44262 send_sig(SIGPIPE, current, 0);
44263 if (!ret)
44264 ret = -EPIPE;
44265@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
44266 * return EAGAIN if we have the potential of some data in the
44267 * future, otherwise just return 0
44268 */
44269- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
44270+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
44271 ret = -EAGAIN;
44272
44273 pipe_unlock(ipipe);
44274diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
44275--- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
44276+++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
44277@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
44278
44279 struct sysfs_open_dirent {
44280 atomic_t refcnt;
44281- atomic_t event;
44282+ atomic_unchecked_t event;
44283 wait_queue_head_t poll;
44284 struct list_head buffers; /* goes through sysfs_buffer.list */
44285 };
44286@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
44287 if (!sysfs_get_active(attr_sd))
44288 return -ENODEV;
44289
44290- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
44291+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
44292 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
44293
44294 sysfs_put_active(attr_sd);
44295@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
44296 return -ENOMEM;
44297
44298 atomic_set(&new_od->refcnt, 0);
44299- atomic_set(&new_od->event, 1);
44300+ atomic_set_unchecked(&new_od->event, 1);
44301 init_waitqueue_head(&new_od->poll);
44302 INIT_LIST_HEAD(&new_od->buffers);
44303 goto retry;
44304@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
44305
44306 sysfs_put_active(attr_sd);
44307
44308- if (buffer->event != atomic_read(&od->event))
44309+ if (buffer->event != atomic_read_unchecked(&od->event))
44310 goto trigger;
44311
44312 return DEFAULT_POLLMASK;
44313@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
44314
44315 od = sd->s_attr.open;
44316 if (od) {
44317- atomic_inc(&od->event);
44318+ atomic_inc_unchecked(&od->event);
44319 wake_up_interruptible(&od->poll);
44320 }
44321
44322diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
44323--- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
44324+++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
44325@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
44326 .s_name = "",
44327 .s_count = ATOMIC_INIT(1),
44328 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
44329+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44330+ .s_mode = S_IFDIR | S_IRWXU,
44331+#else
44332 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44333+#endif
44334 .s_ino = 1,
44335 };
44336
44337diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
44338--- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
44339+++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
44340@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
44341
44342 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44343 {
44344- char *page = nd_get_link(nd);
44345+ const char *page = nd_get_link(nd);
44346 if (!IS_ERR(page))
44347 free_page((unsigned long)page);
44348 }
44349diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
44350--- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
44351+++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
44352@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
44353 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
44354 int lastblock = 0;
44355
44356+ pax_track_stack();
44357+
44358 prev_epos.offset = udf_file_entry_alloc_offset(inode);
44359 prev_epos.block = iinfo->i_location;
44360 prev_epos.bh = NULL;
44361diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
44362--- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
44363+++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
44364@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
44365
44366 u8 udf_tag_checksum(const struct tag *t)
44367 {
44368- u8 *data = (u8 *)t;
44369+ const u8 *data = (const u8 *)t;
44370 u8 checksum = 0;
44371 int i;
44372 for (i = 0; i < sizeof(struct tag); ++i)
44373diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
44374--- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
44375+++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
44376@@ -1,6 +1,7 @@
44377 #include <linux/compiler.h>
44378 #include <linux/file.h>
44379 #include <linux/fs.h>
44380+#include <linux/security.h>
44381 #include <linux/linkage.h>
44382 #include <linux/mount.h>
44383 #include <linux/namei.h>
44384@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
44385 goto mnt_drop_write_and_out;
44386 }
44387 }
44388+
44389+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
44390+ error = -EACCES;
44391+ goto mnt_drop_write_and_out;
44392+ }
44393+
44394 mutex_lock(&inode->i_mutex);
44395 error = notify_change(path->dentry, &newattrs);
44396 mutex_unlock(&inode->i_mutex);
44397diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
44398--- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
44399+++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
44400@@ -17,8 +17,8 @@
44401 struct posix_acl *
44402 posix_acl_from_xattr(const void *value, size_t size)
44403 {
44404- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
44405- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
44406+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
44407+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
44408 int count;
44409 struct posix_acl *acl;
44410 struct posix_acl_entry *acl_e;
44411diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
44412--- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
44413+++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
44414@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
44415 * Extended attribute SET operations
44416 */
44417 static long
44418-setxattr(struct dentry *d, const char __user *name, const void __user *value,
44419+setxattr(struct path *path, const char __user *name, const void __user *value,
44420 size_t size, int flags)
44421 {
44422 int error;
44423@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
44424 return PTR_ERR(kvalue);
44425 }
44426
44427- error = vfs_setxattr(d, kname, kvalue, size, flags);
44428+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
44429+ error = -EACCES;
44430+ goto out;
44431+ }
44432+
44433+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
44434+out:
44435 kfree(kvalue);
44436 return error;
44437 }
44438@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
44439 return error;
44440 error = mnt_want_write(path.mnt);
44441 if (!error) {
44442- error = setxattr(path.dentry, name, value, size, flags);
44443+ error = setxattr(&path, name, value, size, flags);
44444 mnt_drop_write(path.mnt);
44445 }
44446 path_put(&path);
44447@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
44448 return error;
44449 error = mnt_want_write(path.mnt);
44450 if (!error) {
44451- error = setxattr(path.dentry, name, value, size, flags);
44452+ error = setxattr(&path, name, value, size, flags);
44453 mnt_drop_write(path.mnt);
44454 }
44455 path_put(&path);
44456@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
44457 const void __user *,value, size_t, size, int, flags)
44458 {
44459 struct file *f;
44460- struct dentry *dentry;
44461 int error = -EBADF;
44462
44463 f = fget(fd);
44464 if (!f)
44465 return error;
44466- dentry = f->f_path.dentry;
44467- audit_inode(NULL, dentry);
44468+ audit_inode(NULL, f->f_path.dentry);
44469 error = mnt_want_write_file(f);
44470 if (!error) {
44471- error = setxattr(dentry, name, value, size, flags);
44472+ error = setxattr(&f->f_path, name, value, size, flags);
44473 mnt_drop_write(f->f_path.mnt);
44474 }
44475 fput(f);
44476diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
44477--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
44478+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
44479@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
44480 xfs_fsop_geom_t fsgeo;
44481 int error;
44482
44483+ memset(&fsgeo, 0, sizeof(fsgeo));
44484 error = xfs_fs_geometry(mp, &fsgeo, 3);
44485 if (error)
44486 return -error;
44487diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
44488--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
44489+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
44490@@ -128,7 +128,7 @@ xfs_find_handle(
44491 }
44492
44493 error = -EFAULT;
44494- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
44495+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
44496 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
44497 goto out_put;
44498
44499diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
44500--- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
44501+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
44502@@ -437,7 +437,7 @@ xfs_vn_put_link(
44503 struct nameidata *nd,
44504 void *p)
44505 {
44506- char *s = nd_get_link(nd);
44507+ const char *s = nd_get_link(nd);
44508
44509 if (!IS_ERR(s))
44510 kfree(s);
44511diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
44512--- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
44513+++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
44514@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
44515 int nmap,
44516 int ret_nmap);
44517 #else
44518-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
44519+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
44520 #endif /* DEBUG */
44521
44522 STATIC int
44523diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
44524--- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
44525+++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
44526@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
44527 }
44528
44529 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
44530- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
44531+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
44532+ char name[sfep->namelen];
44533+ memcpy(name, sfep->name, sfep->namelen);
44534+ if (filldir(dirent, name, sfep->namelen,
44535+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
44536+ *offset = off & 0x7fffffff;
44537+ return 0;
44538+ }
44539+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
44540 off & 0x7fffffff, ino, DT_UNKNOWN)) {
44541 *offset = off & 0x7fffffff;
44542 return 0;
44543diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
44544--- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44545+++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
44546@@ -0,0 +1,105 @@
44547+#include <linux/kernel.h>
44548+#include <linux/mm.h>
44549+#include <linux/slab.h>
44550+#include <linux/vmalloc.h>
44551+#include <linux/gracl.h>
44552+#include <linux/grsecurity.h>
44553+
44554+static unsigned long alloc_stack_next = 1;
44555+static unsigned long alloc_stack_size = 1;
44556+static void **alloc_stack;
44557+
44558+static __inline__ int
44559+alloc_pop(void)
44560+{
44561+ if (alloc_stack_next == 1)
44562+ return 0;
44563+
44564+ kfree(alloc_stack[alloc_stack_next - 2]);
44565+
44566+ alloc_stack_next--;
44567+
44568+ return 1;
44569+}
44570+
44571+static __inline__ int
44572+alloc_push(void *buf)
44573+{
44574+ if (alloc_stack_next >= alloc_stack_size)
44575+ return 1;
44576+
44577+ alloc_stack[alloc_stack_next - 1] = buf;
44578+
44579+ alloc_stack_next++;
44580+
44581+ return 0;
44582+}
44583+
44584+void *
44585+acl_alloc(unsigned long len)
44586+{
44587+ void *ret = NULL;
44588+
44589+ if (!len || len > PAGE_SIZE)
44590+ goto out;
44591+
44592+ ret = kmalloc(len, GFP_KERNEL);
44593+
44594+ if (ret) {
44595+ if (alloc_push(ret)) {
44596+ kfree(ret);
44597+ ret = NULL;
44598+ }
44599+ }
44600+
44601+out:
44602+ return ret;
44603+}
44604+
44605+void *
44606+acl_alloc_num(unsigned long num, unsigned long len)
44607+{
44608+ if (!len || (num > (PAGE_SIZE / len)))
44609+ return NULL;
44610+
44611+ return acl_alloc(num * len);
44612+}
44613+
44614+void
44615+acl_free_all(void)
44616+{
44617+ if (gr_acl_is_enabled() || !alloc_stack)
44618+ return;
44619+
44620+ while (alloc_pop()) ;
44621+
44622+ if (alloc_stack) {
44623+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44624+ kfree(alloc_stack);
44625+ else
44626+ vfree(alloc_stack);
44627+ }
44628+
44629+ alloc_stack = NULL;
44630+ alloc_stack_size = 1;
44631+ alloc_stack_next = 1;
44632+
44633+ return;
44634+}
44635+
44636+int
44637+acl_alloc_stack_init(unsigned long size)
44638+{
44639+ if ((size * sizeof (void *)) <= PAGE_SIZE)
44640+ alloc_stack =
44641+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44642+ else
44643+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
44644+
44645+ alloc_stack_size = size;
44646+
44647+ if (!alloc_stack)
44648+ return 0;
44649+ else
44650+ return 1;
44651+}
44652diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
44653--- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44654+++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
44655@@ -0,0 +1,4106 @@
44656+#include <linux/kernel.h>
44657+#include <linux/module.h>
44658+#include <linux/sched.h>
44659+#include <linux/mm.h>
44660+#include <linux/file.h>
44661+#include <linux/fs.h>
44662+#include <linux/namei.h>
44663+#include <linux/mount.h>
44664+#include <linux/tty.h>
44665+#include <linux/proc_fs.h>
44666+#include <linux/lglock.h>
44667+#include <linux/slab.h>
44668+#include <linux/vmalloc.h>
44669+#include <linux/types.h>
44670+#include <linux/sysctl.h>
44671+#include <linux/netdevice.h>
44672+#include <linux/ptrace.h>
44673+#include <linux/gracl.h>
44674+#include <linux/gralloc.h>
44675+#include <linux/grsecurity.h>
44676+#include <linux/grinternal.h>
44677+#include <linux/pid_namespace.h>
44678+#include <linux/fdtable.h>
44679+#include <linux/percpu.h>
44680+
44681+#include <asm/uaccess.h>
44682+#include <asm/errno.h>
44683+#include <asm/mman.h>
44684+
44685+static struct acl_role_db acl_role_set;
44686+static struct name_db name_set;
44687+static struct inodev_db inodev_set;
44688+
44689+/* for keeping track of userspace pointers used for subjects, so we
44690+ can share references in the kernel as well
44691+*/
44692+
44693+static struct path real_root;
44694+
44695+static struct acl_subj_map_db subj_map_set;
44696+
44697+static struct acl_role_label *default_role;
44698+
44699+static struct acl_role_label *role_list;
44700+
44701+static u16 acl_sp_role_value;
44702+
44703+extern char *gr_shared_page[4];
44704+static DEFINE_MUTEX(gr_dev_mutex);
44705+DEFINE_RWLOCK(gr_inode_lock);
44706+
44707+struct gr_arg *gr_usermode;
44708+
44709+static unsigned int gr_status __read_only = GR_STATUS_INIT;
44710+
44711+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44712+extern void gr_clear_learn_entries(void);
44713+
44714+#ifdef CONFIG_GRKERNSEC_RESLOG
44715+extern void gr_log_resource(const struct task_struct *task,
44716+ const int res, const unsigned long wanted, const int gt);
44717+#endif
44718+
44719+unsigned char *gr_system_salt;
44720+unsigned char *gr_system_sum;
44721+
44722+static struct sprole_pw **acl_special_roles = NULL;
44723+static __u16 num_sprole_pws = 0;
44724+
44725+static struct acl_role_label *kernel_role = NULL;
44726+
44727+static unsigned int gr_auth_attempts = 0;
44728+static unsigned long gr_auth_expires = 0UL;
44729+
44730+#ifdef CONFIG_NET
44731+extern struct vfsmount *sock_mnt;
44732+#endif
44733+
44734+extern struct vfsmount *pipe_mnt;
44735+extern struct vfsmount *shm_mnt;
44736+#ifdef CONFIG_HUGETLBFS
44737+extern struct vfsmount *hugetlbfs_vfsmount;
44738+#endif
44739+
44740+static struct acl_object_label *fakefs_obj_rw;
44741+static struct acl_object_label *fakefs_obj_rwx;
44742+
44743+extern int gr_init_uidset(void);
44744+extern void gr_free_uidset(void);
44745+extern void gr_remove_uid(uid_t uid);
44746+extern int gr_find_uid(uid_t uid);
44747+
44748+DECLARE_BRLOCK(vfsmount_lock);
44749+
44750+__inline__ int
44751+gr_acl_is_enabled(void)
44752+{
44753+ return (gr_status & GR_READY);
44754+}
44755+
44756+#ifdef CONFIG_BTRFS_FS
44757+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44758+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44759+#endif
44760+
44761+static inline dev_t __get_dev(const struct dentry *dentry)
44762+{
44763+#ifdef CONFIG_BTRFS_FS
44764+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44765+ return get_btrfs_dev_from_inode(dentry->d_inode);
44766+ else
44767+#endif
44768+ return dentry->d_inode->i_sb->s_dev;
44769+}
44770+
44771+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44772+{
44773+ return __get_dev(dentry);
44774+}
44775+
44776+static char gr_task_roletype_to_char(struct task_struct *task)
44777+{
44778+ switch (task->role->roletype &
44779+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44780+ GR_ROLE_SPECIAL)) {
44781+ case GR_ROLE_DEFAULT:
44782+ return 'D';
44783+ case GR_ROLE_USER:
44784+ return 'U';
44785+ case GR_ROLE_GROUP:
44786+ return 'G';
44787+ case GR_ROLE_SPECIAL:
44788+ return 'S';
44789+ }
44790+
44791+ return 'X';
44792+}
44793+
44794+char gr_roletype_to_char(void)
44795+{
44796+ return gr_task_roletype_to_char(current);
44797+}
44798+
44799+__inline__ int
44800+gr_acl_tpe_check(void)
44801+{
44802+ if (unlikely(!(gr_status & GR_READY)))
44803+ return 0;
44804+ if (current->role->roletype & GR_ROLE_TPE)
44805+ return 1;
44806+ else
44807+ return 0;
44808+}
44809+
44810+int
44811+gr_handle_rawio(const struct inode *inode)
44812+{
44813+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44814+ if (inode && S_ISBLK(inode->i_mode) &&
44815+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44816+ !capable(CAP_SYS_RAWIO))
44817+ return 1;
44818+#endif
44819+ return 0;
44820+}
44821+
44822+static int
44823+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44824+{
44825+ if (likely(lena != lenb))
44826+ return 0;
44827+
44828+ return !memcmp(a, b, lena);
44829+}
44830+
44831+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
44832+{
44833+ *buflen -= namelen;
44834+ if (*buflen < 0)
44835+ return -ENAMETOOLONG;
44836+ *buffer -= namelen;
44837+ memcpy(*buffer, str, namelen);
44838+ return 0;
44839+}
44840+
44841+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
44842+{
44843+ return prepend(buffer, buflen, name->name, name->len);
44844+}
44845+
44846+static int prepend_path(const struct path *path, struct path *root,
44847+ char **buffer, int *buflen)
44848+{
44849+ struct dentry *dentry = path->dentry;
44850+ struct vfsmount *vfsmnt = path->mnt;
44851+ bool slash = false;
44852+ int error = 0;
44853+
44854+ while (dentry != root->dentry || vfsmnt != root->mnt) {
44855+ struct dentry * parent;
44856+
44857+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44858+ /* Global root? */
44859+ if (vfsmnt->mnt_parent == vfsmnt) {
44860+ goto out;
44861+ }
44862+ dentry = vfsmnt->mnt_mountpoint;
44863+ vfsmnt = vfsmnt->mnt_parent;
44864+ continue;
44865+ }
44866+ parent = dentry->d_parent;
44867+ prefetch(parent);
44868+ spin_lock(&dentry->d_lock);
44869+ error = prepend_name(buffer, buflen, &dentry->d_name);
44870+ spin_unlock(&dentry->d_lock);
44871+ if (!error)
44872+ error = prepend(buffer, buflen, "/", 1);
44873+ if (error)
44874+ break;
44875+
44876+ slash = true;
44877+ dentry = parent;
44878+ }
44879+
44880+out:
44881+ if (!error && !slash)
44882+ error = prepend(buffer, buflen, "/", 1);
44883+
44884+ return error;
44885+}
44886+
44887+/* this must be called with vfsmount_lock and rename_lock held */
44888+
44889+static char *__our_d_path(const struct path *path, struct path *root,
44890+ char *buf, int buflen)
44891+{
44892+ char *res = buf + buflen;
44893+ int error;
44894+
44895+ prepend(&res, &buflen, "\0", 1);
44896+ error = prepend_path(path, root, &res, &buflen);
44897+ if (error)
44898+ return ERR_PTR(error);
44899+
44900+ return res;
44901+}
44902+
44903+static char *
44904+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
44905+{
44906+ char *retval;
44907+
44908+ retval = __our_d_path(path, root, buf, buflen);
44909+ if (unlikely(IS_ERR(retval)))
44910+ retval = strcpy(buf, "<path too long>");
44911+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44912+ retval[1] = '\0';
44913+
44914+ return retval;
44915+}
44916+
44917+static char *
44918+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44919+ char *buf, int buflen)
44920+{
44921+ struct path path;
44922+ char *res;
44923+
44924+ path.dentry = (struct dentry *)dentry;
44925+ path.mnt = (struct vfsmount *)vfsmnt;
44926+
44927+ /* we can use real_root.dentry, real_root.mnt, because this is only called
44928+ by the RBAC system */
44929+ res = gen_full_path(&path, &real_root, buf, buflen);
44930+
44931+ return res;
44932+}
44933+
44934+static char *
44935+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44936+ char *buf, int buflen)
44937+{
44938+ char *res;
44939+ struct path path;
44940+ struct path root;
44941+ struct task_struct *reaper = &init_task;
44942+
44943+ path.dentry = (struct dentry *)dentry;
44944+ path.mnt = (struct vfsmount *)vfsmnt;
44945+
44946+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
44947+ get_fs_root(reaper->fs, &root);
44948+
44949+ write_seqlock(&rename_lock);
44950+ br_read_lock(vfsmount_lock);
44951+ res = gen_full_path(&path, &root, buf, buflen);
44952+ br_read_unlock(vfsmount_lock);
44953+ write_sequnlock(&rename_lock);
44954+
44955+ path_put(&root);
44956+ return res;
44957+}
44958+
44959+static char *
44960+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44961+{
44962+ char *ret;
44963+ write_seqlock(&rename_lock);
44964+ br_read_lock(vfsmount_lock);
44965+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44966+ PAGE_SIZE);
44967+ br_read_unlock(vfsmount_lock);
44968+ write_sequnlock(&rename_lock);
44969+ return ret;
44970+}
44971+
44972+char *
44973+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44974+{
44975+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44976+ PAGE_SIZE);
44977+}
44978+
44979+char *
44980+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44981+{
44982+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44983+ PAGE_SIZE);
44984+}
44985+
44986+char *
44987+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44988+{
44989+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44990+ PAGE_SIZE);
44991+}
44992+
44993+char *
44994+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44995+{
44996+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44997+ PAGE_SIZE);
44998+}
44999+
45000+char *
45001+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
45002+{
45003+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
45004+ PAGE_SIZE);
45005+}
45006+
45007+__inline__ __u32
45008+to_gr_audit(const __u32 reqmode)
45009+{
45010+ /* masks off auditable permission flags, then shifts them to create
45011+ auditing flags, and adds the special case of append auditing if
45012+ we're requesting write */
45013+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
45014+}
45015+
45016+struct acl_subject_label *
45017+lookup_subject_map(const struct acl_subject_label *userp)
45018+{
45019+ unsigned int index = shash(userp, subj_map_set.s_size);
45020+ struct subject_map *match;
45021+
45022+ match = subj_map_set.s_hash[index];
45023+
45024+ while (match && match->user != userp)
45025+ match = match->next;
45026+
45027+ if (match != NULL)
45028+ return match->kernel;
45029+ else
45030+ return NULL;
45031+}
45032+
45033+static void
45034+insert_subj_map_entry(struct subject_map *subjmap)
45035+{
45036+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
45037+ struct subject_map **curr;
45038+
45039+ subjmap->prev = NULL;
45040+
45041+ curr = &subj_map_set.s_hash[index];
45042+ if (*curr != NULL)
45043+ (*curr)->prev = subjmap;
45044+
45045+ subjmap->next = *curr;
45046+ *curr = subjmap;
45047+
45048+ return;
45049+}
45050+
45051+static struct acl_role_label *
45052+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
45053+ const gid_t gid)
45054+{
45055+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
45056+ struct acl_role_label *match;
45057+ struct role_allowed_ip *ipp;
45058+ unsigned int x;
45059+ u32 curr_ip = task->signal->curr_ip;
45060+
45061+ task->signal->saved_ip = curr_ip;
45062+
45063+ match = acl_role_set.r_hash[index];
45064+
45065+ while (match) {
45066+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
45067+ for (x = 0; x < match->domain_child_num; x++) {
45068+ if (match->domain_children[x] == uid)
45069+ goto found;
45070+ }
45071+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
45072+ break;
45073+ match = match->next;
45074+ }
45075+found:
45076+ if (match == NULL) {
45077+ try_group:
45078+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
45079+ match = acl_role_set.r_hash[index];
45080+
45081+ while (match) {
45082+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
45083+ for (x = 0; x < match->domain_child_num; x++) {
45084+ if (match->domain_children[x] == gid)
45085+ goto found2;
45086+ }
45087+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
45088+ break;
45089+ match = match->next;
45090+ }
45091+found2:
45092+ if (match == NULL)
45093+ match = default_role;
45094+ if (match->allowed_ips == NULL)
45095+ return match;
45096+ else {
45097+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
45098+ if (likely
45099+ ((ntohl(curr_ip) & ipp->netmask) ==
45100+ (ntohl(ipp->addr) & ipp->netmask)))
45101+ return match;
45102+ }
45103+ match = default_role;
45104+ }
45105+ } else if (match->allowed_ips == NULL) {
45106+ return match;
45107+ } else {
45108+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
45109+ if (likely
45110+ ((ntohl(curr_ip) & ipp->netmask) ==
45111+ (ntohl(ipp->addr) & ipp->netmask)))
45112+ return match;
45113+ }
45114+ goto try_group;
45115+ }
45116+
45117+ return match;
45118+}
45119+
45120+struct acl_subject_label *
45121+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
45122+ const struct acl_role_label *role)
45123+{
45124+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
45125+ struct acl_subject_label *match;
45126+
45127+ match = role->subj_hash[index];
45128+
45129+ while (match && (match->inode != ino || match->device != dev ||
45130+ (match->mode & GR_DELETED))) {
45131+ match = match->next;
45132+ }
45133+
45134+ if (match && !(match->mode & GR_DELETED))
45135+ return match;
45136+ else
45137+ return NULL;
45138+}
45139+
45140+struct acl_subject_label *
45141+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
45142+ const struct acl_role_label *role)
45143+{
45144+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
45145+ struct acl_subject_label *match;
45146+
45147+ match = role->subj_hash[index];
45148+
45149+ while (match && (match->inode != ino || match->device != dev ||
45150+ !(match->mode & GR_DELETED))) {
45151+ match = match->next;
45152+ }
45153+
45154+ if (match && (match->mode & GR_DELETED))
45155+ return match;
45156+ else
45157+ return NULL;
45158+}
45159+
45160+static struct acl_object_label *
45161+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
45162+ const struct acl_subject_label *subj)
45163+{
45164+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45165+ struct acl_object_label *match;
45166+
45167+ match = subj->obj_hash[index];
45168+
45169+ while (match && (match->inode != ino || match->device != dev ||
45170+ (match->mode & GR_DELETED))) {
45171+ match = match->next;
45172+ }
45173+
45174+ if (match && !(match->mode & GR_DELETED))
45175+ return match;
45176+ else
45177+ return NULL;
45178+}
45179+
45180+static struct acl_object_label *
45181+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
45182+ const struct acl_subject_label *subj)
45183+{
45184+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45185+ struct acl_object_label *match;
45186+
45187+ match = subj->obj_hash[index];
45188+
45189+ while (match && (match->inode != ino || match->device != dev ||
45190+ !(match->mode & GR_DELETED))) {
45191+ match = match->next;
45192+ }
45193+
45194+ if (match && (match->mode & GR_DELETED))
45195+ return match;
45196+
45197+ match = subj->obj_hash[index];
45198+
45199+ while (match && (match->inode != ino || match->device != dev ||
45200+ (match->mode & GR_DELETED))) {
45201+ match = match->next;
45202+ }
45203+
45204+ if (match && !(match->mode & GR_DELETED))
45205+ return match;
45206+ else
45207+ return NULL;
45208+}
45209+
45210+static struct name_entry *
45211+lookup_name_entry(const char *name)
45212+{
45213+ unsigned int len = strlen(name);
45214+ unsigned int key = full_name_hash(name, len);
45215+ unsigned int index = key % name_set.n_size;
45216+ struct name_entry *match;
45217+
45218+ match = name_set.n_hash[index];
45219+
45220+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
45221+ match = match->next;
45222+
45223+ return match;
45224+}
45225+
45226+static struct name_entry *
45227+lookup_name_entry_create(const char *name)
45228+{
45229+ unsigned int len = strlen(name);
45230+ unsigned int key = full_name_hash(name, len);
45231+ unsigned int index = key % name_set.n_size;
45232+ struct name_entry *match;
45233+
45234+ match = name_set.n_hash[index];
45235+
45236+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45237+ !match->deleted))
45238+ match = match->next;
45239+
45240+ if (match && match->deleted)
45241+ return match;
45242+
45243+ match = name_set.n_hash[index];
45244+
45245+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45246+ match->deleted))
45247+ match = match->next;
45248+
45249+ if (match && !match->deleted)
45250+ return match;
45251+ else
45252+ return NULL;
45253+}
45254+
45255+static struct inodev_entry *
45256+lookup_inodev_entry(const ino_t ino, const dev_t dev)
45257+{
45258+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
45259+ struct inodev_entry *match;
45260+
45261+ match = inodev_set.i_hash[index];
45262+
45263+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
45264+ match = match->next;
45265+
45266+ return match;
45267+}
45268+
45269+static void
45270+insert_inodev_entry(struct inodev_entry *entry)
45271+{
45272+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
45273+ inodev_set.i_size);
45274+ struct inodev_entry **curr;
45275+
45276+ entry->prev = NULL;
45277+
45278+ curr = &inodev_set.i_hash[index];
45279+ if (*curr != NULL)
45280+ (*curr)->prev = entry;
45281+
45282+ entry->next = *curr;
45283+ *curr = entry;
45284+
45285+ return;
45286+}
45287+
45288+static void
45289+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
45290+{
45291+ unsigned int index =
45292+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
45293+ struct acl_role_label **curr;
45294+ struct acl_role_label *tmp;
45295+
45296+ curr = &acl_role_set.r_hash[index];
45297+
45298+ /* if role was already inserted due to domains and already has
45299+ a role in the same bucket as it attached, then we need to
45300+ combine these two buckets
45301+ */
45302+ if (role->next) {
45303+ tmp = role->next;
45304+ while (tmp->next)
45305+ tmp = tmp->next;
45306+ tmp->next = *curr;
45307+ } else
45308+ role->next = *curr;
45309+ *curr = role;
45310+
45311+ return;
45312+}
45313+
45314+static void
45315+insert_acl_role_label(struct acl_role_label *role)
45316+{
45317+ int i;
45318+
45319+ if (role_list == NULL) {
45320+ role_list = role;
45321+ role->prev = NULL;
45322+ } else {
45323+ role->prev = role_list;
45324+ role_list = role;
45325+ }
45326+
45327+ /* used for hash chains */
45328+ role->next = NULL;
45329+
45330+ if (role->roletype & GR_ROLE_DOMAIN) {
45331+ for (i = 0; i < role->domain_child_num; i++)
45332+ __insert_acl_role_label(role, role->domain_children[i]);
45333+ } else
45334+ __insert_acl_role_label(role, role->uidgid);
45335+}
45336+
45337+static int
45338+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
45339+{
45340+ struct name_entry **curr, *nentry;
45341+ struct inodev_entry *ientry;
45342+ unsigned int len = strlen(name);
45343+ unsigned int key = full_name_hash(name, len);
45344+ unsigned int index = key % name_set.n_size;
45345+
45346+ curr = &name_set.n_hash[index];
45347+
45348+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
45349+ curr = &((*curr)->next);
45350+
45351+ if (*curr != NULL)
45352+ return 1;
45353+
45354+ nentry = acl_alloc(sizeof (struct name_entry));
45355+ if (nentry == NULL)
45356+ return 0;
45357+ ientry = acl_alloc(sizeof (struct inodev_entry));
45358+ if (ientry == NULL)
45359+ return 0;
45360+ ientry->nentry = nentry;
45361+
45362+ nentry->key = key;
45363+ nentry->name = name;
45364+ nentry->inode = inode;
45365+ nentry->device = device;
45366+ nentry->len = len;
45367+ nentry->deleted = deleted;
45368+
45369+ nentry->prev = NULL;
45370+ curr = &name_set.n_hash[index];
45371+ if (*curr != NULL)
45372+ (*curr)->prev = nentry;
45373+ nentry->next = *curr;
45374+ *curr = nentry;
45375+
45376+ /* insert us into the table searchable by inode/dev */
45377+ insert_inodev_entry(ientry);
45378+
45379+ return 1;
45380+}
45381+
45382+static void
45383+insert_acl_obj_label(struct acl_object_label *obj,
45384+ struct acl_subject_label *subj)
45385+{
45386+ unsigned int index =
45387+ fhash(obj->inode, obj->device, subj->obj_hash_size);
45388+ struct acl_object_label **curr;
45389+
45390+
45391+ obj->prev = NULL;
45392+
45393+ curr = &subj->obj_hash[index];
45394+ if (*curr != NULL)
45395+ (*curr)->prev = obj;
45396+
45397+ obj->next = *curr;
45398+ *curr = obj;
45399+
45400+ return;
45401+}
45402+
45403+static void
45404+insert_acl_subj_label(struct acl_subject_label *obj,
45405+ struct acl_role_label *role)
45406+{
45407+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
45408+ struct acl_subject_label **curr;
45409+
45410+ obj->prev = NULL;
45411+
45412+ curr = &role->subj_hash[index];
45413+ if (*curr != NULL)
45414+ (*curr)->prev = obj;
45415+
45416+ obj->next = *curr;
45417+ *curr = obj;
45418+
45419+ return;
45420+}
45421+
45422+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
45423+
45424+static void *
45425+create_table(__u32 * len, int elementsize)
45426+{
45427+ unsigned int table_sizes[] = {
45428+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
45429+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
45430+ 4194301, 8388593, 16777213, 33554393, 67108859
45431+ };
45432+ void *newtable = NULL;
45433+ unsigned int pwr = 0;
45434+
45435+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
45436+ table_sizes[pwr] <= *len)
45437+ pwr++;
45438+
45439+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
45440+ return newtable;
45441+
45442+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
45443+ newtable =
45444+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
45445+ else
45446+ newtable = vmalloc(table_sizes[pwr] * elementsize);
45447+
45448+ *len = table_sizes[pwr];
45449+
45450+ return newtable;
45451+}
45452+
45453+static int
45454+init_variables(const struct gr_arg *arg)
45455+{
45456+ struct task_struct *reaper = &init_task;
45457+ unsigned int stacksize;
45458+
45459+ subj_map_set.s_size = arg->role_db.num_subjects;
45460+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
45461+ name_set.n_size = arg->role_db.num_objects;
45462+ inodev_set.i_size = arg->role_db.num_objects;
45463+
45464+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
45465+ !name_set.n_size || !inodev_set.i_size)
45466+ return 1;
45467+
45468+ if (!gr_init_uidset())
45469+ return 1;
45470+
45471+ /* set up the stack that holds allocation info */
45472+
45473+ stacksize = arg->role_db.num_pointers + 5;
45474+
45475+ if (!acl_alloc_stack_init(stacksize))
45476+ return 1;
45477+
45478+ /* grab reference for the real root dentry and vfsmount */
45479+ get_fs_root(reaper->fs, &real_root);
45480+
45481+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45482+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
45483+#endif
45484+
45485+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
45486+ if (fakefs_obj_rw == NULL)
45487+ return 1;
45488+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
45489+
45490+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
45491+ if (fakefs_obj_rwx == NULL)
45492+ return 1;
45493+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
45494+
45495+ subj_map_set.s_hash =
45496+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
45497+ acl_role_set.r_hash =
45498+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
45499+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
45500+ inodev_set.i_hash =
45501+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
45502+
45503+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
45504+ !name_set.n_hash || !inodev_set.i_hash)
45505+ return 1;
45506+
45507+ memset(subj_map_set.s_hash, 0,
45508+ sizeof(struct subject_map *) * subj_map_set.s_size);
45509+ memset(acl_role_set.r_hash, 0,
45510+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
45511+ memset(name_set.n_hash, 0,
45512+ sizeof (struct name_entry *) * name_set.n_size);
45513+ memset(inodev_set.i_hash, 0,
45514+ sizeof (struct inodev_entry *) * inodev_set.i_size);
45515+
45516+ return 0;
45517+}
45518+
45519+/* free information not needed after startup
45520+ currently contains user->kernel pointer mappings for subjects
45521+*/
45522+
45523+static void
45524+free_init_variables(void)
45525+{
45526+ __u32 i;
45527+
45528+ if (subj_map_set.s_hash) {
45529+ for (i = 0; i < subj_map_set.s_size; i++) {
45530+ if (subj_map_set.s_hash[i]) {
45531+ kfree(subj_map_set.s_hash[i]);
45532+ subj_map_set.s_hash[i] = NULL;
45533+ }
45534+ }
45535+
45536+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
45537+ PAGE_SIZE)
45538+ kfree(subj_map_set.s_hash);
45539+ else
45540+ vfree(subj_map_set.s_hash);
45541+ }
45542+
45543+ return;
45544+}
45545+
45546+static void
45547+free_variables(void)
45548+{
45549+ struct acl_subject_label *s;
45550+ struct acl_role_label *r;
45551+ struct task_struct *task, *task2;
45552+ unsigned int x;
45553+
45554+ gr_clear_learn_entries();
45555+
45556+ read_lock(&tasklist_lock);
45557+ do_each_thread(task2, task) {
45558+ task->acl_sp_role = 0;
45559+ task->acl_role_id = 0;
45560+ task->acl = NULL;
45561+ task->role = NULL;
45562+ } while_each_thread(task2, task);
45563+ read_unlock(&tasklist_lock);
45564+
45565+ /* release the reference to the real root dentry and vfsmount */
45566+ path_put(&real_root);
45567+
45568+ /* free all object hash tables */
45569+
45570+ FOR_EACH_ROLE_START(r)
45571+ if (r->subj_hash == NULL)
45572+ goto next_role;
45573+ FOR_EACH_SUBJECT_START(r, s, x)
45574+ if (s->obj_hash == NULL)
45575+ break;
45576+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45577+ kfree(s->obj_hash);
45578+ else
45579+ vfree(s->obj_hash);
45580+ FOR_EACH_SUBJECT_END(s, x)
45581+ FOR_EACH_NESTED_SUBJECT_START(r, s)
45582+ if (s->obj_hash == NULL)
45583+ break;
45584+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45585+ kfree(s->obj_hash);
45586+ else
45587+ vfree(s->obj_hash);
45588+ FOR_EACH_NESTED_SUBJECT_END(s)
45589+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45590+ kfree(r->subj_hash);
45591+ else
45592+ vfree(r->subj_hash);
45593+ r->subj_hash = NULL;
45594+next_role:
45595+ FOR_EACH_ROLE_END(r)
45596+
45597+ acl_free_all();
45598+
45599+ if (acl_role_set.r_hash) {
45600+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45601+ PAGE_SIZE)
45602+ kfree(acl_role_set.r_hash);
45603+ else
45604+ vfree(acl_role_set.r_hash);
45605+ }
45606+ if (name_set.n_hash) {
45607+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
45608+ PAGE_SIZE)
45609+ kfree(name_set.n_hash);
45610+ else
45611+ vfree(name_set.n_hash);
45612+ }
45613+
45614+ if (inodev_set.i_hash) {
45615+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45616+ PAGE_SIZE)
45617+ kfree(inodev_set.i_hash);
45618+ else
45619+ vfree(inodev_set.i_hash);
45620+ }
45621+
45622+ gr_free_uidset();
45623+
45624+ memset(&name_set, 0, sizeof (struct name_db));
45625+ memset(&inodev_set, 0, sizeof (struct inodev_db));
45626+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45627+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45628+
45629+ default_role = NULL;
45630+ role_list = NULL;
45631+
45632+ return;
45633+}
45634+
45635+static __u32
45636+count_user_objs(struct acl_object_label *userp)
45637+{
45638+ struct acl_object_label o_tmp;
45639+ __u32 num = 0;
45640+
45641+ while (userp) {
45642+ if (copy_from_user(&o_tmp, userp,
45643+ sizeof (struct acl_object_label)))
45644+ break;
45645+
45646+ userp = o_tmp.prev;
45647+ num++;
45648+ }
45649+
45650+ return num;
45651+}
45652+
45653+static struct acl_subject_label *
45654+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45655+
45656+static int
45657+copy_user_glob(struct acl_object_label *obj)
45658+{
45659+ struct acl_object_label *g_tmp, **guser;
45660+ unsigned int len;
45661+ char *tmp;
45662+
45663+ if (obj->globbed == NULL)
45664+ return 0;
45665+
45666+ guser = &obj->globbed;
45667+ while (*guser) {
45668+ g_tmp = (struct acl_object_label *)
45669+ acl_alloc(sizeof (struct acl_object_label));
45670+ if (g_tmp == NULL)
45671+ return -ENOMEM;
45672+
45673+ if (copy_from_user(g_tmp, *guser,
45674+ sizeof (struct acl_object_label)))
45675+ return -EFAULT;
45676+
45677+ len = strnlen_user(g_tmp->filename, PATH_MAX);
45678+
45679+ if (!len || len >= PATH_MAX)
45680+ return -EINVAL;
45681+
45682+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45683+ return -ENOMEM;
45684+
45685+ if (copy_from_user(tmp, g_tmp->filename, len))
45686+ return -EFAULT;
45687+ tmp[len-1] = '\0';
45688+ g_tmp->filename = tmp;
45689+
45690+ *guser = g_tmp;
45691+ guser = &(g_tmp->next);
45692+ }
45693+
45694+ return 0;
45695+}
45696+
45697+static int
45698+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45699+ struct acl_role_label *role)
45700+{
45701+ struct acl_object_label *o_tmp;
45702+ unsigned int len;
45703+ int ret;
45704+ char *tmp;
45705+
45706+ while (userp) {
45707+ if ((o_tmp = (struct acl_object_label *)
45708+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
45709+ return -ENOMEM;
45710+
45711+ if (copy_from_user(o_tmp, userp,
45712+ sizeof (struct acl_object_label)))
45713+ return -EFAULT;
45714+
45715+ userp = o_tmp->prev;
45716+
45717+ len = strnlen_user(o_tmp->filename, PATH_MAX);
45718+
45719+ if (!len || len >= PATH_MAX)
45720+ return -EINVAL;
45721+
45722+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45723+ return -ENOMEM;
45724+
45725+ if (copy_from_user(tmp, o_tmp->filename, len))
45726+ return -EFAULT;
45727+ tmp[len-1] = '\0';
45728+ o_tmp->filename = tmp;
45729+
45730+ insert_acl_obj_label(o_tmp, subj);
45731+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45732+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45733+ return -ENOMEM;
45734+
45735+ ret = copy_user_glob(o_tmp);
45736+ if (ret)
45737+ return ret;
45738+
45739+ if (o_tmp->nested) {
45740+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45741+ if (IS_ERR(o_tmp->nested))
45742+ return PTR_ERR(o_tmp->nested);
45743+
45744+ /* insert into nested subject list */
45745+ o_tmp->nested->next = role->hash->first;
45746+ role->hash->first = o_tmp->nested;
45747+ }
45748+ }
45749+
45750+ return 0;
45751+}
45752+
45753+static __u32
45754+count_user_subjs(struct acl_subject_label *userp)
45755+{
45756+ struct acl_subject_label s_tmp;
45757+ __u32 num = 0;
45758+
45759+ while (userp) {
45760+ if (copy_from_user(&s_tmp, userp,
45761+ sizeof (struct acl_subject_label)))
45762+ break;
45763+
45764+ userp = s_tmp.prev;
45765+ /* do not count nested subjects against this count, since
45766+ they are not included in the hash table, but are
45767+ attached to objects. We have already counted
45768+ the subjects in userspace for the allocation
45769+ stack
45770+ */
45771+ if (!(s_tmp.mode & GR_NESTED))
45772+ num++;
45773+ }
45774+
45775+ return num;
45776+}
45777+
45778+static int
45779+copy_user_allowedips(struct acl_role_label *rolep)
45780+{
45781+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45782+
45783+ ruserip = rolep->allowed_ips;
45784+
45785+ while (ruserip) {
45786+ rlast = rtmp;
45787+
45788+ if ((rtmp = (struct role_allowed_ip *)
45789+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45790+ return -ENOMEM;
45791+
45792+ if (copy_from_user(rtmp, ruserip,
45793+ sizeof (struct role_allowed_ip)))
45794+ return -EFAULT;
45795+
45796+ ruserip = rtmp->prev;
45797+
45798+ if (!rlast) {
45799+ rtmp->prev = NULL;
45800+ rolep->allowed_ips = rtmp;
45801+ } else {
45802+ rlast->next = rtmp;
45803+ rtmp->prev = rlast;
45804+ }
45805+
45806+ if (!ruserip)
45807+ rtmp->next = NULL;
45808+ }
45809+
45810+ return 0;
45811+}
45812+
45813+static int
45814+copy_user_transitions(struct acl_role_label *rolep)
45815+{
45816+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
45817+
45818+ unsigned int len;
45819+ char *tmp;
45820+
45821+ rusertp = rolep->transitions;
45822+
45823+ while (rusertp) {
45824+ rlast = rtmp;
45825+
45826+ if ((rtmp = (struct role_transition *)
45827+ acl_alloc(sizeof (struct role_transition))) == NULL)
45828+ return -ENOMEM;
45829+
45830+ if (copy_from_user(rtmp, rusertp,
45831+ sizeof (struct role_transition)))
45832+ return -EFAULT;
45833+
45834+ rusertp = rtmp->prev;
45835+
45836+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45837+
45838+ if (!len || len >= GR_SPROLE_LEN)
45839+ return -EINVAL;
45840+
45841+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45842+ return -ENOMEM;
45843+
45844+ if (copy_from_user(tmp, rtmp->rolename, len))
45845+ return -EFAULT;
45846+ tmp[len-1] = '\0';
45847+ rtmp->rolename = tmp;
45848+
45849+ if (!rlast) {
45850+ rtmp->prev = NULL;
45851+ rolep->transitions = rtmp;
45852+ } else {
45853+ rlast->next = rtmp;
45854+ rtmp->prev = rlast;
45855+ }
45856+
45857+ if (!rusertp)
45858+ rtmp->next = NULL;
45859+ }
45860+
45861+ return 0;
45862+}
45863+
45864+static struct acl_subject_label *
45865+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45866+{
45867+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45868+ unsigned int len;
45869+ char *tmp;
45870+ __u32 num_objs;
45871+ struct acl_ip_label **i_tmp, *i_utmp2;
45872+ struct gr_hash_struct ghash;
45873+ struct subject_map *subjmap;
45874+ unsigned int i_num;
45875+ int err;
45876+
45877+ s_tmp = lookup_subject_map(userp);
45878+
45879+ /* we've already copied this subject into the kernel, just return
45880+ the reference to it, and don't copy it over again
45881+ */
45882+ if (s_tmp)
45883+ return(s_tmp);
45884+
45885+ if ((s_tmp = (struct acl_subject_label *)
45886+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45887+ return ERR_PTR(-ENOMEM);
45888+
45889+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45890+ if (subjmap == NULL)
45891+ return ERR_PTR(-ENOMEM);
45892+
45893+ subjmap->user = userp;
45894+ subjmap->kernel = s_tmp;
45895+ insert_subj_map_entry(subjmap);
45896+
45897+ if (copy_from_user(s_tmp, userp,
45898+ sizeof (struct acl_subject_label)))
45899+ return ERR_PTR(-EFAULT);
45900+
45901+ len = strnlen_user(s_tmp->filename, PATH_MAX);
45902+
45903+ if (!len || len >= PATH_MAX)
45904+ return ERR_PTR(-EINVAL);
45905+
45906+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45907+ return ERR_PTR(-ENOMEM);
45908+
45909+ if (copy_from_user(tmp, s_tmp->filename, len))
45910+ return ERR_PTR(-EFAULT);
45911+ tmp[len-1] = '\0';
45912+ s_tmp->filename = tmp;
45913+
45914+ if (!strcmp(s_tmp->filename, "/"))
45915+ role->root_label = s_tmp;
45916+
45917+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45918+ return ERR_PTR(-EFAULT);
45919+
45920+ /* copy user and group transition tables */
45921+
45922+ if (s_tmp->user_trans_num) {
45923+ uid_t *uidlist;
45924+
45925+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45926+ if (uidlist == NULL)
45927+ return ERR_PTR(-ENOMEM);
45928+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45929+ return ERR_PTR(-EFAULT);
45930+
45931+ s_tmp->user_transitions = uidlist;
45932+ }
45933+
45934+ if (s_tmp->group_trans_num) {
45935+ gid_t *gidlist;
45936+
45937+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45938+ if (gidlist == NULL)
45939+ return ERR_PTR(-ENOMEM);
45940+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45941+ return ERR_PTR(-EFAULT);
45942+
45943+ s_tmp->group_transitions = gidlist;
45944+ }
45945+
45946+ /* set up object hash table */
45947+ num_objs = count_user_objs(ghash.first);
45948+
45949+ s_tmp->obj_hash_size = num_objs;
45950+ s_tmp->obj_hash =
45951+ (struct acl_object_label **)
45952+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45953+
45954+ if (!s_tmp->obj_hash)
45955+ return ERR_PTR(-ENOMEM);
45956+
45957+ memset(s_tmp->obj_hash, 0,
45958+ s_tmp->obj_hash_size *
45959+ sizeof (struct acl_object_label *));
45960+
45961+ /* add in objects */
45962+ err = copy_user_objs(ghash.first, s_tmp, role);
45963+
45964+ if (err)
45965+ return ERR_PTR(err);
45966+
45967+ /* set pointer for parent subject */
45968+ if (s_tmp->parent_subject) {
45969+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45970+
45971+ if (IS_ERR(s_tmp2))
45972+ return s_tmp2;
45973+
45974+ s_tmp->parent_subject = s_tmp2;
45975+ }
45976+
45977+ /* add in ip acls */
45978+
45979+ if (!s_tmp->ip_num) {
45980+ s_tmp->ips = NULL;
45981+ goto insert;
45982+ }
45983+
45984+ i_tmp =
45985+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45986+ sizeof (struct acl_ip_label *));
45987+
45988+ if (!i_tmp)
45989+ return ERR_PTR(-ENOMEM);
45990+
45991+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45992+ *(i_tmp + i_num) =
45993+ (struct acl_ip_label *)
45994+ acl_alloc(sizeof (struct acl_ip_label));
45995+ if (!*(i_tmp + i_num))
45996+ return ERR_PTR(-ENOMEM);
45997+
45998+ if (copy_from_user
45999+ (&i_utmp2, s_tmp->ips + i_num,
46000+ sizeof (struct acl_ip_label *)))
46001+ return ERR_PTR(-EFAULT);
46002+
46003+ if (copy_from_user
46004+ (*(i_tmp + i_num), i_utmp2,
46005+ sizeof (struct acl_ip_label)))
46006+ return ERR_PTR(-EFAULT);
46007+
46008+ if ((*(i_tmp + i_num))->iface == NULL)
46009+ continue;
46010+
46011+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
46012+ if (!len || len >= IFNAMSIZ)
46013+ return ERR_PTR(-EINVAL);
46014+ tmp = acl_alloc(len);
46015+ if (tmp == NULL)
46016+ return ERR_PTR(-ENOMEM);
46017+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
46018+ return ERR_PTR(-EFAULT);
46019+ (*(i_tmp + i_num))->iface = tmp;
46020+ }
46021+
46022+ s_tmp->ips = i_tmp;
46023+
46024+insert:
46025+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
46026+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
46027+ return ERR_PTR(-ENOMEM);
46028+
46029+ return s_tmp;
46030+}
46031+
46032+static int
46033+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
46034+{
46035+ struct acl_subject_label s_pre;
46036+ struct acl_subject_label * ret;
46037+ int err;
46038+
46039+ while (userp) {
46040+ if (copy_from_user(&s_pre, userp,
46041+ sizeof (struct acl_subject_label)))
46042+ return -EFAULT;
46043+
46044+ /* do not add nested subjects here, add
46045+ while parsing objects
46046+ */
46047+
46048+ if (s_pre.mode & GR_NESTED) {
46049+ userp = s_pre.prev;
46050+ continue;
46051+ }
46052+
46053+ ret = do_copy_user_subj(userp, role);
46054+
46055+ err = PTR_ERR(ret);
46056+ if (IS_ERR(ret))
46057+ return err;
46058+
46059+ insert_acl_subj_label(ret, role);
46060+
46061+ userp = s_pre.prev;
46062+ }
46063+
46064+ return 0;
46065+}
46066+
46067+static int
46068+copy_user_acl(struct gr_arg *arg)
46069+{
46070+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
46071+ struct sprole_pw *sptmp;
46072+ struct gr_hash_struct *ghash;
46073+ uid_t *domainlist;
46074+ unsigned int r_num;
46075+ unsigned int len;
46076+ char *tmp;
46077+ int err = 0;
46078+ __u16 i;
46079+ __u32 num_subjs;
46080+
46081+ /* we need a default and kernel role */
46082+ if (arg->role_db.num_roles < 2)
46083+ return -EINVAL;
46084+
46085+ /* copy special role authentication info from userspace */
46086+
46087+ num_sprole_pws = arg->num_sprole_pws;
46088+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
46089+
46090+ if (!acl_special_roles) {
46091+ err = -ENOMEM;
46092+ goto cleanup;
46093+ }
46094+
46095+ for (i = 0; i < num_sprole_pws; i++) {
46096+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
46097+ if (!sptmp) {
46098+ err = -ENOMEM;
46099+ goto cleanup;
46100+ }
46101+ if (copy_from_user(sptmp, arg->sprole_pws + i,
46102+ sizeof (struct sprole_pw))) {
46103+ err = -EFAULT;
46104+ goto cleanup;
46105+ }
46106+
46107+ len =
46108+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
46109+
46110+ if (!len || len >= GR_SPROLE_LEN) {
46111+ err = -EINVAL;
46112+ goto cleanup;
46113+ }
46114+
46115+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
46116+ err = -ENOMEM;
46117+ goto cleanup;
46118+ }
46119+
46120+ if (copy_from_user(tmp, sptmp->rolename, len)) {
46121+ err = -EFAULT;
46122+ goto cleanup;
46123+ }
46124+ tmp[len-1] = '\0';
46125+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46126+ printk(KERN_ALERT "Copying special role %s\n", tmp);
46127+#endif
46128+ sptmp->rolename = tmp;
46129+ acl_special_roles[i] = sptmp;
46130+ }
46131+
46132+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
46133+
46134+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
46135+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
46136+
46137+ if (!r_tmp) {
46138+ err = -ENOMEM;
46139+ goto cleanup;
46140+ }
46141+
46142+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
46143+ sizeof (struct acl_role_label *))) {
46144+ err = -EFAULT;
46145+ goto cleanup;
46146+ }
46147+
46148+ if (copy_from_user(r_tmp, r_utmp2,
46149+ sizeof (struct acl_role_label))) {
46150+ err = -EFAULT;
46151+ goto cleanup;
46152+ }
46153+
46154+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
46155+
46156+ if (!len || len >= PATH_MAX) {
46157+ err = -EINVAL;
46158+ goto cleanup;
46159+ }
46160+
46161+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
46162+ err = -ENOMEM;
46163+ goto cleanup;
46164+ }
46165+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
46166+ err = -EFAULT;
46167+ goto cleanup;
46168+ }
46169+ tmp[len-1] = '\0';
46170+ r_tmp->rolename = tmp;
46171+
46172+ if (!strcmp(r_tmp->rolename, "default")
46173+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
46174+ default_role = r_tmp;
46175+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
46176+ kernel_role = r_tmp;
46177+ }
46178+
46179+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
46180+ err = -ENOMEM;
46181+ goto cleanup;
46182+ }
46183+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
46184+ err = -EFAULT;
46185+ goto cleanup;
46186+ }
46187+
46188+ r_tmp->hash = ghash;
46189+
46190+ num_subjs = count_user_subjs(r_tmp->hash->first);
46191+
46192+ r_tmp->subj_hash_size = num_subjs;
46193+ r_tmp->subj_hash =
46194+ (struct acl_subject_label **)
46195+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
46196+
46197+ if (!r_tmp->subj_hash) {
46198+ err = -ENOMEM;
46199+ goto cleanup;
46200+ }
46201+
46202+ err = copy_user_allowedips(r_tmp);
46203+ if (err)
46204+ goto cleanup;
46205+
46206+ /* copy domain info */
46207+ if (r_tmp->domain_children != NULL) {
46208+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
46209+ if (domainlist == NULL) {
46210+ err = -ENOMEM;
46211+ goto cleanup;
46212+ }
46213+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
46214+ err = -EFAULT;
46215+ goto cleanup;
46216+ }
46217+ r_tmp->domain_children = domainlist;
46218+ }
46219+
46220+ err = copy_user_transitions(r_tmp);
46221+ if (err)
46222+ goto cleanup;
46223+
46224+ memset(r_tmp->subj_hash, 0,
46225+ r_tmp->subj_hash_size *
46226+ sizeof (struct acl_subject_label *));
46227+
46228+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
46229+
46230+ if (err)
46231+ goto cleanup;
46232+
46233+ /* set nested subject list to null */
46234+ r_tmp->hash->first = NULL;
46235+
46236+ insert_acl_role_label(r_tmp);
46237+ }
46238+
46239+ goto return_err;
46240+ cleanup:
46241+ free_variables();
46242+ return_err:
46243+ return err;
46244+
46245+}
46246+
46247+static int
46248+gracl_init(struct gr_arg *args)
46249+{
46250+ int error = 0;
46251+
46252+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
46253+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
46254+
46255+ if (init_variables(args)) {
46256+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
46257+ error = -ENOMEM;
46258+ free_variables();
46259+ goto out;
46260+ }
46261+
46262+ error = copy_user_acl(args);
46263+ free_init_variables();
46264+ if (error) {
46265+ free_variables();
46266+ goto out;
46267+ }
46268+
46269+ if ((error = gr_set_acls(0))) {
46270+ free_variables();
46271+ goto out;
46272+ }
46273+
46274+ pax_open_kernel();
46275+ gr_status |= GR_READY;
46276+ pax_close_kernel();
46277+
46278+ out:
46279+ return error;
46280+}
46281+
46282+/* derived from glibc fnmatch() 0: match, 1: no match*/
46283+
46284+static int
46285+glob_match(const char *p, const char *n)
46286+{
46287+ char c;
46288+
46289+ while ((c = *p++) != '\0') {
46290+ switch (c) {
46291+ case '?':
46292+ if (*n == '\0')
46293+ return 1;
46294+ else if (*n == '/')
46295+ return 1;
46296+ break;
46297+ case '\\':
46298+ if (*n != c)
46299+ return 1;
46300+ break;
46301+ case '*':
46302+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
46303+ if (*n == '/')
46304+ return 1;
46305+ else if (c == '?') {
46306+ if (*n == '\0')
46307+ return 1;
46308+ else
46309+ ++n;
46310+ }
46311+ }
46312+ if (c == '\0') {
46313+ return 0;
46314+ } else {
46315+ const char *endp;
46316+
46317+ if ((endp = strchr(n, '/')) == NULL)
46318+ endp = n + strlen(n);
46319+
46320+ if (c == '[') {
46321+ for (--p; n < endp; ++n)
46322+ if (!glob_match(p, n))
46323+ return 0;
46324+ } else if (c == '/') {
46325+ while (*n != '\0' && *n != '/')
46326+ ++n;
46327+ if (*n == '/' && !glob_match(p, n + 1))
46328+ return 0;
46329+ } else {
46330+ for (--p; n < endp; ++n)
46331+ if (*n == c && !glob_match(p, n))
46332+ return 0;
46333+ }
46334+
46335+ return 1;
46336+ }
46337+ case '[':
46338+ {
46339+ int not;
46340+ char cold;
46341+
46342+ if (*n == '\0' || *n == '/')
46343+ return 1;
46344+
46345+ not = (*p == '!' || *p == '^');
46346+ if (not)
46347+ ++p;
46348+
46349+ c = *p++;
46350+ for (;;) {
46351+ unsigned char fn = (unsigned char)*n;
46352+
46353+ if (c == '\0')
46354+ return 1;
46355+ else {
46356+ if (c == fn)
46357+ goto matched;
46358+ cold = c;
46359+ c = *p++;
46360+
46361+ if (c == '-' && *p != ']') {
46362+ unsigned char cend = *p++;
46363+
46364+ if (cend == '\0')
46365+ return 1;
46366+
46367+ if (cold <= fn && fn <= cend)
46368+ goto matched;
46369+
46370+ c = *p++;
46371+ }
46372+ }
46373+
46374+ if (c == ']')
46375+ break;
46376+ }
46377+ if (!not)
46378+ return 1;
46379+ break;
46380+ matched:
46381+ while (c != ']') {
46382+ if (c == '\0')
46383+ return 1;
46384+
46385+ c = *p++;
46386+ }
46387+ if (not)
46388+ return 1;
46389+ }
46390+ break;
46391+ default:
46392+ if (c != *n)
46393+ return 1;
46394+ }
46395+
46396+ ++n;
46397+ }
46398+
46399+ if (*n == '\0')
46400+ return 0;
46401+
46402+ if (*n == '/')
46403+ return 0;
46404+
46405+ return 1;
46406+}
46407+
46408+static struct acl_object_label *
46409+chk_glob_label(struct acl_object_label *globbed,
46410+ struct dentry *dentry, struct vfsmount *mnt, char **path)
46411+{
46412+ struct acl_object_label *tmp;
46413+
46414+ if (*path == NULL)
46415+ *path = gr_to_filename_nolock(dentry, mnt);
46416+
46417+ tmp = globbed;
46418+
46419+ while (tmp) {
46420+ if (!glob_match(tmp->filename, *path))
46421+ return tmp;
46422+ tmp = tmp->next;
46423+ }
46424+
46425+ return NULL;
46426+}
46427+
46428+static struct acl_object_label *
46429+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46430+ const ino_t curr_ino, const dev_t curr_dev,
46431+ const struct acl_subject_label *subj, char **path, const int checkglob)
46432+{
46433+ struct acl_subject_label *tmpsubj;
46434+ struct acl_object_label *retval;
46435+ struct acl_object_label *retval2;
46436+
46437+ tmpsubj = (struct acl_subject_label *) subj;
46438+ read_lock(&gr_inode_lock);
46439+ do {
46440+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
46441+ if (retval) {
46442+ if (checkglob && retval->globbed) {
46443+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
46444+ (struct vfsmount *)orig_mnt, path);
46445+ if (retval2)
46446+ retval = retval2;
46447+ }
46448+ break;
46449+ }
46450+ } while ((tmpsubj = tmpsubj->parent_subject));
46451+ read_unlock(&gr_inode_lock);
46452+
46453+ return retval;
46454+}
46455+
46456+static __inline__ struct acl_object_label *
46457+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46458+ struct dentry *curr_dentry,
46459+ const struct acl_subject_label *subj, char **path, const int checkglob)
46460+{
46461+ int newglob = checkglob;
46462+ ino_t inode;
46463+ dev_t device;
46464+
46465+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
46466+ as we don't want a / * rule to match instead of the / object
46467+ don't do this for create lookups that call this function though, since they're looking up
46468+ on the parent and thus need globbing checks on all paths
46469+ */
46470+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
46471+ newglob = GR_NO_GLOB;
46472+
46473+ spin_lock(&curr_dentry->d_lock);
46474+ inode = curr_dentry->d_inode->i_ino;
46475+ device = __get_dev(curr_dentry);
46476+ spin_unlock(&curr_dentry->d_lock);
46477+
46478+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
46479+}
46480+
46481+static struct acl_object_label *
46482+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46483+ const struct acl_subject_label *subj, char *path, const int checkglob)
46484+{
46485+ struct dentry *dentry = (struct dentry *) l_dentry;
46486+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46487+ struct acl_object_label *retval;
46488+ struct dentry *parent;
46489+
46490+ write_seqlock(&rename_lock);
46491+ br_read_lock(vfsmount_lock);
46492+
46493+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
46494+#ifdef CONFIG_NET
46495+ mnt == sock_mnt ||
46496+#endif
46497+#ifdef CONFIG_HUGETLBFS
46498+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
46499+#endif
46500+ /* ignore Eric Biederman */
46501+ IS_PRIVATE(l_dentry->d_inode))) {
46502+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
46503+ goto out;
46504+ }
46505+
46506+ for (;;) {
46507+ if (dentry == real_root.dentry && mnt == real_root.mnt)
46508+ break;
46509+
46510+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46511+ if (mnt->mnt_parent == mnt)
46512+ break;
46513+
46514+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46515+ if (retval != NULL)
46516+ goto out;
46517+
46518+ dentry = mnt->mnt_mountpoint;
46519+ mnt = mnt->mnt_parent;
46520+ continue;
46521+ }
46522+
46523+ parent = dentry->d_parent;
46524+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46525+ if (retval != NULL)
46526+ goto out;
46527+
46528+ dentry = parent;
46529+ }
46530+
46531+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46532+
46533+ /* real_root is pinned so we don't have to hold a reference */
46534+ if (retval == NULL)
46535+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
46536+out:
46537+ br_read_unlock(vfsmount_lock);
46538+ write_sequnlock(&rename_lock);
46539+
46540+ BUG_ON(retval == NULL);
46541+
46542+ return retval;
46543+}
46544+
46545+static __inline__ struct acl_object_label *
46546+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46547+ const struct acl_subject_label *subj)
46548+{
46549+ char *path = NULL;
46550+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46551+}
46552+
46553+static __inline__ struct acl_object_label *
46554+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46555+ const struct acl_subject_label *subj)
46556+{
46557+ char *path = NULL;
46558+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46559+}
46560+
46561+static __inline__ struct acl_object_label *
46562+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46563+ const struct acl_subject_label *subj, char *path)
46564+{
46565+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46566+}
46567+
46568+static struct acl_subject_label *
46569+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46570+ const struct acl_role_label *role)
46571+{
46572+ struct dentry *dentry = (struct dentry *) l_dentry;
46573+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46574+ struct acl_subject_label *retval;
46575+ struct dentry *parent;
46576+
46577+ write_seqlock(&rename_lock);
46578+ br_read_lock(vfsmount_lock);
46579+
46580+ for (;;) {
46581+ if (dentry == real_root.dentry && mnt == real_root.mnt)
46582+ break;
46583+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46584+ if (mnt->mnt_parent == mnt)
46585+ break;
46586+
46587+ spin_lock(&dentry->d_lock);
46588+ read_lock(&gr_inode_lock);
46589+ retval =
46590+ lookup_acl_subj_label(dentry->d_inode->i_ino,
46591+ __get_dev(dentry), role);
46592+ read_unlock(&gr_inode_lock);
46593+ spin_unlock(&dentry->d_lock);
46594+ if (retval != NULL)
46595+ goto out;
46596+
46597+ dentry = mnt->mnt_mountpoint;
46598+ mnt = mnt->mnt_parent;
46599+ continue;
46600+ }
46601+
46602+ spin_lock(&dentry->d_lock);
46603+ read_lock(&gr_inode_lock);
46604+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46605+ __get_dev(dentry), role);
46606+ read_unlock(&gr_inode_lock);
46607+ parent = dentry->d_parent;
46608+ spin_unlock(&dentry->d_lock);
46609+
46610+ if (retval != NULL)
46611+ goto out;
46612+
46613+ dentry = parent;
46614+ }
46615+
46616+ spin_lock(&dentry->d_lock);
46617+ read_lock(&gr_inode_lock);
46618+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46619+ __get_dev(dentry), role);
46620+ read_unlock(&gr_inode_lock);
46621+ spin_unlock(&dentry->d_lock);
46622+
46623+ if (unlikely(retval == NULL)) {
46624+ /* real_root is pinned, we don't need to hold a reference */
46625+ read_lock(&gr_inode_lock);
46626+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
46627+ __get_dev(real_root.dentry), role);
46628+ read_unlock(&gr_inode_lock);
46629+ }
46630+out:
46631+ br_read_unlock(vfsmount_lock);
46632+ write_sequnlock(&rename_lock);
46633+
46634+ BUG_ON(retval == NULL);
46635+
46636+ return retval;
46637+}
46638+
46639+static void
46640+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46641+{
46642+ struct task_struct *task = current;
46643+ const struct cred *cred = current_cred();
46644+
46645+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46646+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46647+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46648+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46649+
46650+ return;
46651+}
46652+
46653+static void
46654+gr_log_learn_sysctl(const char *path, const __u32 mode)
46655+{
46656+ struct task_struct *task = current;
46657+ const struct cred *cred = current_cred();
46658+
46659+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46660+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46661+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46662+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46663+
46664+ return;
46665+}
46666+
46667+static void
46668+gr_log_learn_id_change(const char type, const unsigned int real,
46669+ const unsigned int effective, const unsigned int fs)
46670+{
46671+ struct task_struct *task = current;
46672+ const struct cred *cred = current_cred();
46673+
46674+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46675+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46676+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46677+ type, real, effective, fs, &task->signal->saved_ip);
46678+
46679+ return;
46680+}
46681+
46682+__u32
46683+gr_check_link(const struct dentry * new_dentry,
46684+ const struct dentry * parent_dentry,
46685+ const struct vfsmount * parent_mnt,
46686+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46687+{
46688+ struct acl_object_label *obj;
46689+ __u32 oldmode, newmode;
46690+ __u32 needmode;
46691+
46692+ if (unlikely(!(gr_status & GR_READY)))
46693+ return (GR_CREATE | GR_LINK);
46694+
46695+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46696+ oldmode = obj->mode;
46697+
46698+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46699+ oldmode |= (GR_CREATE | GR_LINK);
46700+
46701+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46702+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46703+ needmode |= GR_SETID | GR_AUDIT_SETID;
46704+
46705+ newmode =
46706+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
46707+ oldmode | needmode);
46708+
46709+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46710+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46711+ GR_INHERIT | GR_AUDIT_INHERIT);
46712+
46713+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46714+ goto bad;
46715+
46716+ if ((oldmode & needmode) != needmode)
46717+ goto bad;
46718+
46719+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46720+ if ((newmode & needmode) != needmode)
46721+ goto bad;
46722+
46723+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46724+ return newmode;
46725+bad:
46726+ needmode = oldmode;
46727+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46728+ needmode |= GR_SETID;
46729+
46730+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46731+ gr_log_learn(old_dentry, old_mnt, needmode);
46732+ return (GR_CREATE | GR_LINK);
46733+ } else if (newmode & GR_SUPPRESS)
46734+ return GR_SUPPRESS;
46735+ else
46736+ return 0;
46737+}
46738+
46739+__u32
46740+gr_search_file(const struct dentry * dentry, const __u32 mode,
46741+ const struct vfsmount * mnt)
46742+{
46743+ __u32 retval = mode;
46744+ struct acl_subject_label *curracl;
46745+ struct acl_object_label *currobj;
46746+
46747+ if (unlikely(!(gr_status & GR_READY)))
46748+ return (mode & ~GR_AUDITS);
46749+
46750+ curracl = current->acl;
46751+
46752+ currobj = chk_obj_label(dentry, mnt, curracl);
46753+ retval = currobj->mode & mode;
46754+
46755+ /* if we're opening a specified transfer file for writing
46756+ (e.g. /dev/initctl), then transfer our role to init
46757+ */
46758+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46759+ current->role->roletype & GR_ROLE_PERSIST)) {
46760+ struct task_struct *task = init_pid_ns.child_reaper;
46761+
46762+ if (task->role != current->role) {
46763+ task->acl_sp_role = 0;
46764+ task->acl_role_id = current->acl_role_id;
46765+ task->role = current->role;
46766+ rcu_read_lock();
46767+ read_lock(&grsec_exec_file_lock);
46768+ gr_apply_subject_to_task(task);
46769+ read_unlock(&grsec_exec_file_lock);
46770+ rcu_read_unlock();
46771+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46772+ }
46773+ }
46774+
46775+ if (unlikely
46776+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46777+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46778+ __u32 new_mode = mode;
46779+
46780+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46781+
46782+ retval = new_mode;
46783+
46784+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46785+ new_mode |= GR_INHERIT;
46786+
46787+ if (!(mode & GR_NOLEARN))
46788+ gr_log_learn(dentry, mnt, new_mode);
46789+ }
46790+
46791+ return retval;
46792+}
46793+
46794+__u32
46795+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46796+ const struct vfsmount * mnt, const __u32 mode)
46797+{
46798+ struct name_entry *match;
46799+ struct acl_object_label *matchpo;
46800+ struct acl_subject_label *curracl;
46801+ char *path;
46802+ __u32 retval;
46803+
46804+ if (unlikely(!(gr_status & GR_READY)))
46805+ return (mode & ~GR_AUDITS);
46806+
46807+ preempt_disable();
46808+ path = gr_to_filename_rbac(new_dentry, mnt);
46809+ match = lookup_name_entry_create(path);
46810+
46811+ if (!match)
46812+ goto check_parent;
46813+
46814+ curracl = current->acl;
46815+
46816+ read_lock(&gr_inode_lock);
46817+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46818+ read_unlock(&gr_inode_lock);
46819+
46820+ if (matchpo) {
46821+ if ((matchpo->mode & mode) !=
46822+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
46823+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46824+ __u32 new_mode = mode;
46825+
46826+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46827+
46828+ gr_log_learn(new_dentry, mnt, new_mode);
46829+
46830+ preempt_enable();
46831+ return new_mode;
46832+ }
46833+ preempt_enable();
46834+ return (matchpo->mode & mode);
46835+ }
46836+
46837+ check_parent:
46838+ curracl = current->acl;
46839+
46840+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46841+ retval = matchpo->mode & mode;
46842+
46843+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46844+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46845+ __u32 new_mode = mode;
46846+
46847+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46848+
46849+ gr_log_learn(new_dentry, mnt, new_mode);
46850+ preempt_enable();
46851+ return new_mode;
46852+ }
46853+
46854+ preempt_enable();
46855+ return retval;
46856+}
46857+
46858+int
46859+gr_check_hidden_task(const struct task_struct *task)
46860+{
46861+ if (unlikely(!(gr_status & GR_READY)))
46862+ return 0;
46863+
46864+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46865+ return 1;
46866+
46867+ return 0;
46868+}
46869+
46870+int
46871+gr_check_protected_task(const struct task_struct *task)
46872+{
46873+ if (unlikely(!(gr_status & GR_READY) || !task))
46874+ return 0;
46875+
46876+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46877+ task->acl != current->acl)
46878+ return 1;
46879+
46880+ return 0;
46881+}
46882+
46883+int
46884+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46885+{
46886+ struct task_struct *p;
46887+ int ret = 0;
46888+
46889+ if (unlikely(!(gr_status & GR_READY) || !pid))
46890+ return ret;
46891+
46892+ read_lock(&tasklist_lock);
46893+ do_each_pid_task(pid, type, p) {
46894+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46895+ p->acl != current->acl) {
46896+ ret = 1;
46897+ goto out;
46898+ }
46899+ } while_each_pid_task(pid, type, p);
46900+out:
46901+ read_unlock(&tasklist_lock);
46902+
46903+ return ret;
46904+}
46905+
46906+void
46907+gr_copy_label(struct task_struct *tsk)
46908+{
46909+ tsk->signal->used_accept = 0;
46910+ tsk->acl_sp_role = 0;
46911+ tsk->acl_role_id = current->acl_role_id;
46912+ tsk->acl = current->acl;
46913+ tsk->role = current->role;
46914+ tsk->signal->curr_ip = current->signal->curr_ip;
46915+ tsk->signal->saved_ip = current->signal->saved_ip;
46916+ if (current->exec_file)
46917+ get_file(current->exec_file);
46918+ tsk->exec_file = current->exec_file;
46919+ tsk->is_writable = current->is_writable;
46920+ if (unlikely(current->signal->used_accept)) {
46921+ current->signal->curr_ip = 0;
46922+ current->signal->saved_ip = 0;
46923+ }
46924+
46925+ return;
46926+}
46927+
46928+static void
46929+gr_set_proc_res(struct task_struct *task)
46930+{
46931+ struct acl_subject_label *proc;
46932+ unsigned short i;
46933+
46934+ proc = task->acl;
46935+
46936+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46937+ return;
46938+
46939+ for (i = 0; i < RLIM_NLIMITS; i++) {
46940+ if (!(proc->resmask & (1 << i)))
46941+ continue;
46942+
46943+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46944+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46945+ }
46946+
46947+ return;
46948+}
46949+
46950+extern int __gr_process_user_ban(struct user_struct *user);
46951+
46952+int
46953+gr_check_user_change(int real, int effective, int fs)
46954+{
46955+ unsigned int i;
46956+ __u16 num;
46957+ uid_t *uidlist;
46958+ int curuid;
46959+ int realok = 0;
46960+ int effectiveok = 0;
46961+ int fsok = 0;
46962+
46963+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46964+ struct user_struct *user;
46965+
46966+ if (real == -1)
46967+ goto skipit;
46968+
46969+ user = find_user(real);
46970+ if (user == NULL)
46971+ goto skipit;
46972+
46973+ if (__gr_process_user_ban(user)) {
46974+ /* for find_user */
46975+ free_uid(user);
46976+ return 1;
46977+ }
46978+
46979+ /* for find_user */
46980+ free_uid(user);
46981+
46982+skipit:
46983+#endif
46984+
46985+ if (unlikely(!(gr_status & GR_READY)))
46986+ return 0;
46987+
46988+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46989+ gr_log_learn_id_change('u', real, effective, fs);
46990+
46991+ num = current->acl->user_trans_num;
46992+ uidlist = current->acl->user_transitions;
46993+
46994+ if (uidlist == NULL)
46995+ return 0;
46996+
46997+ if (real == -1)
46998+ realok = 1;
46999+ if (effective == -1)
47000+ effectiveok = 1;
47001+ if (fs == -1)
47002+ fsok = 1;
47003+
47004+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
47005+ for (i = 0; i < num; i++) {
47006+ curuid = (int)uidlist[i];
47007+ if (real == curuid)
47008+ realok = 1;
47009+ if (effective == curuid)
47010+ effectiveok = 1;
47011+ if (fs == curuid)
47012+ fsok = 1;
47013+ }
47014+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
47015+ for (i = 0; i < num; i++) {
47016+ curuid = (int)uidlist[i];
47017+ if (real == curuid)
47018+ break;
47019+ if (effective == curuid)
47020+ break;
47021+ if (fs == curuid)
47022+ break;
47023+ }
47024+ /* not in deny list */
47025+ if (i == num) {
47026+ realok = 1;
47027+ effectiveok = 1;
47028+ fsok = 1;
47029+ }
47030+ }
47031+
47032+ if (realok && effectiveok && fsok)
47033+ return 0;
47034+ else {
47035+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
47036+ return 1;
47037+ }
47038+}
47039+
47040+int
47041+gr_check_group_change(int real, int effective, int fs)
47042+{
47043+ unsigned int i;
47044+ __u16 num;
47045+ gid_t *gidlist;
47046+ int curgid;
47047+ int realok = 0;
47048+ int effectiveok = 0;
47049+ int fsok = 0;
47050+
47051+ if (unlikely(!(gr_status & GR_READY)))
47052+ return 0;
47053+
47054+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47055+ gr_log_learn_id_change('g', real, effective, fs);
47056+
47057+ num = current->acl->group_trans_num;
47058+ gidlist = current->acl->group_transitions;
47059+
47060+ if (gidlist == NULL)
47061+ return 0;
47062+
47063+ if (real == -1)
47064+ realok = 1;
47065+ if (effective == -1)
47066+ effectiveok = 1;
47067+ if (fs == -1)
47068+ fsok = 1;
47069+
47070+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
47071+ for (i = 0; i < num; i++) {
47072+ curgid = (int)gidlist[i];
47073+ if (real == curgid)
47074+ realok = 1;
47075+ if (effective == curgid)
47076+ effectiveok = 1;
47077+ if (fs == curgid)
47078+ fsok = 1;
47079+ }
47080+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
47081+ for (i = 0; i < num; i++) {
47082+ curgid = (int)gidlist[i];
47083+ if (real == curgid)
47084+ break;
47085+ if (effective == curgid)
47086+ break;
47087+ if (fs == curgid)
47088+ break;
47089+ }
47090+ /* not in deny list */
47091+ if (i == num) {
47092+ realok = 1;
47093+ effectiveok = 1;
47094+ fsok = 1;
47095+ }
47096+ }
47097+
47098+ if (realok && effectiveok && fsok)
47099+ return 0;
47100+ else {
47101+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
47102+ return 1;
47103+ }
47104+}
47105+
47106+void
47107+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
47108+{
47109+ struct acl_role_label *role = task->role;
47110+ struct acl_subject_label *subj = NULL;
47111+ struct acl_object_label *obj;
47112+ struct file *filp;
47113+
47114+ if (unlikely(!(gr_status & GR_READY)))
47115+ return;
47116+
47117+ filp = task->exec_file;
47118+
47119+ /* kernel process, we'll give them the kernel role */
47120+ if (unlikely(!filp)) {
47121+ task->role = kernel_role;
47122+ task->acl = kernel_role->root_label;
47123+ return;
47124+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
47125+ role = lookup_acl_role_label(task, uid, gid);
47126+
47127+ /* perform subject lookup in possibly new role
47128+ we can use this result below in the case where role == task->role
47129+ */
47130+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
47131+
47132+ /* if we changed uid/gid, but result in the same role
47133+ and are using inheritance, don't lose the inherited subject
47134+ if current subject is other than what normal lookup
47135+ would result in, we arrived via inheritance, don't
47136+ lose subject
47137+ */
47138+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
47139+ (subj == task->acl)))
47140+ task->acl = subj;
47141+
47142+ task->role = role;
47143+
47144+ task->is_writable = 0;
47145+
47146+ /* ignore additional mmap checks for processes that are writable
47147+ by the default ACL */
47148+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47149+ if (unlikely(obj->mode & GR_WRITE))
47150+ task->is_writable = 1;
47151+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47152+ if (unlikely(obj->mode & GR_WRITE))
47153+ task->is_writable = 1;
47154+
47155+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47156+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47157+#endif
47158+
47159+ gr_set_proc_res(task);
47160+
47161+ return;
47162+}
47163+
47164+int
47165+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47166+ const int unsafe_share)
47167+{
47168+ struct task_struct *task = current;
47169+ struct acl_subject_label *newacl;
47170+ struct acl_object_label *obj;
47171+ __u32 retmode;
47172+
47173+ if (unlikely(!(gr_status & GR_READY)))
47174+ return 0;
47175+
47176+ newacl = chk_subj_label(dentry, mnt, task->role);
47177+
47178+ task_lock(task);
47179+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
47180+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
47181+ !(task->role->roletype & GR_ROLE_GOD) &&
47182+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
47183+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
47184+ task_unlock(task);
47185+ if (unsafe_share)
47186+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
47187+ else
47188+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
47189+ return -EACCES;
47190+ }
47191+ task_unlock(task);
47192+
47193+ obj = chk_obj_label(dentry, mnt, task->acl);
47194+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
47195+
47196+ if (!(task->acl->mode & GR_INHERITLEARN) &&
47197+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
47198+ if (obj->nested)
47199+ task->acl = obj->nested;
47200+ else
47201+ task->acl = newacl;
47202+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
47203+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
47204+
47205+ task->is_writable = 0;
47206+
47207+ /* ignore additional mmap checks for processes that are writable
47208+ by the default ACL */
47209+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
47210+ if (unlikely(obj->mode & GR_WRITE))
47211+ task->is_writable = 1;
47212+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
47213+ if (unlikely(obj->mode & GR_WRITE))
47214+ task->is_writable = 1;
47215+
47216+ gr_set_proc_res(task);
47217+
47218+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47219+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47220+#endif
47221+ return 0;
47222+}
47223+
47224+/* always called with valid inodev ptr */
47225+static void
47226+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
47227+{
47228+ struct acl_object_label *matchpo;
47229+ struct acl_subject_label *matchps;
47230+ struct acl_subject_label *subj;
47231+ struct acl_role_label *role;
47232+ unsigned int x;
47233+
47234+ FOR_EACH_ROLE_START(role)
47235+ FOR_EACH_SUBJECT_START(role, subj, x)
47236+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
47237+ matchpo->mode |= GR_DELETED;
47238+ FOR_EACH_SUBJECT_END(subj,x)
47239+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47240+ if (subj->inode == ino && subj->device == dev)
47241+ subj->mode |= GR_DELETED;
47242+ FOR_EACH_NESTED_SUBJECT_END(subj)
47243+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
47244+ matchps->mode |= GR_DELETED;
47245+ FOR_EACH_ROLE_END(role)
47246+
47247+ inodev->nentry->deleted = 1;
47248+
47249+ return;
47250+}
47251+
47252+void
47253+gr_handle_delete(const ino_t ino, const dev_t dev)
47254+{
47255+ struct inodev_entry *inodev;
47256+
47257+ if (unlikely(!(gr_status & GR_READY)))
47258+ return;
47259+
47260+ write_lock(&gr_inode_lock);
47261+ inodev = lookup_inodev_entry(ino, dev);
47262+ if (inodev != NULL)
47263+ do_handle_delete(inodev, ino, dev);
47264+ write_unlock(&gr_inode_lock);
47265+
47266+ return;
47267+}
47268+
47269+static void
47270+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
47271+ const ino_t newinode, const dev_t newdevice,
47272+ struct acl_subject_label *subj)
47273+{
47274+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
47275+ struct acl_object_label *match;
47276+
47277+ match = subj->obj_hash[index];
47278+
47279+ while (match && (match->inode != oldinode ||
47280+ match->device != olddevice ||
47281+ !(match->mode & GR_DELETED)))
47282+ match = match->next;
47283+
47284+ if (match && (match->inode == oldinode)
47285+ && (match->device == olddevice)
47286+ && (match->mode & GR_DELETED)) {
47287+ if (match->prev == NULL) {
47288+ subj->obj_hash[index] = match->next;
47289+ if (match->next != NULL)
47290+ match->next->prev = NULL;
47291+ } else {
47292+ match->prev->next = match->next;
47293+ if (match->next != NULL)
47294+ match->next->prev = match->prev;
47295+ }
47296+ match->prev = NULL;
47297+ match->next = NULL;
47298+ match->inode = newinode;
47299+ match->device = newdevice;
47300+ match->mode &= ~GR_DELETED;
47301+
47302+ insert_acl_obj_label(match, subj);
47303+ }
47304+
47305+ return;
47306+}
47307+
47308+static void
47309+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
47310+ const ino_t newinode, const dev_t newdevice,
47311+ struct acl_role_label *role)
47312+{
47313+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
47314+ struct acl_subject_label *match;
47315+
47316+ match = role->subj_hash[index];
47317+
47318+ while (match && (match->inode != oldinode ||
47319+ match->device != olddevice ||
47320+ !(match->mode & GR_DELETED)))
47321+ match = match->next;
47322+
47323+ if (match && (match->inode == oldinode)
47324+ && (match->device == olddevice)
47325+ && (match->mode & GR_DELETED)) {
47326+ if (match->prev == NULL) {
47327+ role->subj_hash[index] = match->next;
47328+ if (match->next != NULL)
47329+ match->next->prev = NULL;
47330+ } else {
47331+ match->prev->next = match->next;
47332+ if (match->next != NULL)
47333+ match->next->prev = match->prev;
47334+ }
47335+ match->prev = NULL;
47336+ match->next = NULL;
47337+ match->inode = newinode;
47338+ match->device = newdevice;
47339+ match->mode &= ~GR_DELETED;
47340+
47341+ insert_acl_subj_label(match, role);
47342+ }
47343+
47344+ return;
47345+}
47346+
47347+static void
47348+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
47349+ const ino_t newinode, const dev_t newdevice)
47350+{
47351+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
47352+ struct inodev_entry *match;
47353+
47354+ match = inodev_set.i_hash[index];
47355+
47356+ while (match && (match->nentry->inode != oldinode ||
47357+ match->nentry->device != olddevice || !match->nentry->deleted))
47358+ match = match->next;
47359+
47360+ if (match && (match->nentry->inode == oldinode)
47361+ && (match->nentry->device == olddevice) &&
47362+ match->nentry->deleted) {
47363+ if (match->prev == NULL) {
47364+ inodev_set.i_hash[index] = match->next;
47365+ if (match->next != NULL)
47366+ match->next->prev = NULL;
47367+ } else {
47368+ match->prev->next = match->next;
47369+ if (match->next != NULL)
47370+ match->next->prev = match->prev;
47371+ }
47372+ match->prev = NULL;
47373+ match->next = NULL;
47374+ match->nentry->inode = newinode;
47375+ match->nentry->device = newdevice;
47376+ match->nentry->deleted = 0;
47377+
47378+ insert_inodev_entry(match);
47379+ }
47380+
47381+ return;
47382+}
47383+
47384+static void
47385+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
47386+ const struct vfsmount *mnt)
47387+{
47388+ struct acl_subject_label *subj;
47389+ struct acl_role_label *role;
47390+ unsigned int x;
47391+ ino_t ino = dentry->d_inode->i_ino;
47392+ dev_t dev = __get_dev(dentry);
47393+
47394+ FOR_EACH_ROLE_START(role)
47395+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
47396+
47397+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47398+ if ((subj->inode == ino) && (subj->device == dev)) {
47399+ subj->inode = ino;
47400+ subj->device = dev;
47401+ }
47402+ FOR_EACH_NESTED_SUBJECT_END(subj)
47403+ FOR_EACH_SUBJECT_START(role, subj, x)
47404+ update_acl_obj_label(matchn->inode, matchn->device,
47405+ ino, dev, subj);
47406+ FOR_EACH_SUBJECT_END(subj,x)
47407+ FOR_EACH_ROLE_END(role)
47408+
47409+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
47410+
47411+ return;
47412+}
47413+
47414+void
47415+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47416+{
47417+ struct name_entry *matchn;
47418+
47419+ if (unlikely(!(gr_status & GR_READY)))
47420+ return;
47421+
47422+ preempt_disable();
47423+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
47424+
47425+ if (unlikely((unsigned long)matchn)) {
47426+ write_lock(&gr_inode_lock);
47427+ do_handle_create(matchn, dentry, mnt);
47428+ write_unlock(&gr_inode_lock);
47429+ }
47430+ preempt_enable();
47431+
47432+ return;
47433+}
47434+
47435+void
47436+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47437+ struct dentry *old_dentry,
47438+ struct dentry *new_dentry,
47439+ struct vfsmount *mnt, const __u8 replace)
47440+{
47441+ struct name_entry *matchn;
47442+ struct inodev_entry *inodev;
47443+ ino_t old_ino = old_dentry->d_inode->i_ino;
47444+ dev_t old_dev = __get_dev(old_dentry);
47445+
47446+ /* vfs_rename swaps the name and parent link for old_dentry and
47447+ new_dentry
47448+ at this point, old_dentry has the new name, parent link, and inode
47449+ for the renamed file
47450+ if a file is being replaced by a rename, new_dentry has the inode
47451+ and name for the replaced file
47452+ */
47453+
47454+ if (unlikely(!(gr_status & GR_READY)))
47455+ return;
47456+
47457+ preempt_disable();
47458+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
47459+
47460+ /* we wouldn't have to check d_inode if it weren't for
47461+ NFS silly-renaming
47462+ */
47463+
47464+ write_lock(&gr_inode_lock);
47465+ if (unlikely(replace && new_dentry->d_inode)) {
47466+ ino_t new_ino = new_dentry->d_inode->i_ino;
47467+ dev_t new_dev = __get_dev(new_dentry);
47468+
47469+ inodev = lookup_inodev_entry(new_ino, new_dev);
47470+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
47471+ do_handle_delete(inodev, new_ino, new_dev);
47472+ }
47473+
47474+ inodev = lookup_inodev_entry(old_ino, old_dev);
47475+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
47476+ do_handle_delete(inodev, old_ino, old_dev);
47477+
47478+ if (unlikely((unsigned long)matchn))
47479+ do_handle_create(matchn, old_dentry, mnt);
47480+
47481+ write_unlock(&gr_inode_lock);
47482+ preempt_enable();
47483+
47484+ return;
47485+}
47486+
47487+static int
47488+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
47489+ unsigned char **sum)
47490+{
47491+ struct acl_role_label *r;
47492+ struct role_allowed_ip *ipp;
47493+ struct role_transition *trans;
47494+ unsigned int i;
47495+ int found = 0;
47496+ u32 curr_ip = current->signal->curr_ip;
47497+
47498+ current->signal->saved_ip = curr_ip;
47499+
47500+ /* check transition table */
47501+
47502+ for (trans = current->role->transitions; trans; trans = trans->next) {
47503+ if (!strcmp(rolename, trans->rolename)) {
47504+ found = 1;
47505+ break;
47506+ }
47507+ }
47508+
47509+ if (!found)
47510+ return 0;
47511+
47512+ /* handle special roles that do not require authentication
47513+ and check ip */
47514+
47515+ FOR_EACH_ROLE_START(r)
47516+ if (!strcmp(rolename, r->rolename) &&
47517+ (r->roletype & GR_ROLE_SPECIAL)) {
47518+ found = 0;
47519+ if (r->allowed_ips != NULL) {
47520+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
47521+ if ((ntohl(curr_ip) & ipp->netmask) ==
47522+ (ntohl(ipp->addr) & ipp->netmask))
47523+ found = 1;
47524+ }
47525+ } else
47526+ found = 2;
47527+ if (!found)
47528+ return 0;
47529+
47530+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
47531+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
47532+ *salt = NULL;
47533+ *sum = NULL;
47534+ return 1;
47535+ }
47536+ }
47537+ FOR_EACH_ROLE_END(r)
47538+
47539+ for (i = 0; i < num_sprole_pws; i++) {
47540+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
47541+ *salt = acl_special_roles[i]->salt;
47542+ *sum = acl_special_roles[i]->sum;
47543+ return 1;
47544+ }
47545+ }
47546+
47547+ return 0;
47548+}
47549+
47550+static void
47551+assign_special_role(char *rolename)
47552+{
47553+ struct acl_object_label *obj;
47554+ struct acl_role_label *r;
47555+ struct acl_role_label *assigned = NULL;
47556+ struct task_struct *tsk;
47557+ struct file *filp;
47558+
47559+ FOR_EACH_ROLE_START(r)
47560+ if (!strcmp(rolename, r->rolename) &&
47561+ (r->roletype & GR_ROLE_SPECIAL)) {
47562+ assigned = r;
47563+ break;
47564+ }
47565+ FOR_EACH_ROLE_END(r)
47566+
47567+ if (!assigned)
47568+ return;
47569+
47570+ read_lock(&tasklist_lock);
47571+ read_lock(&grsec_exec_file_lock);
47572+
47573+ tsk = current->real_parent;
47574+ if (tsk == NULL)
47575+ goto out_unlock;
47576+
47577+ filp = tsk->exec_file;
47578+ if (filp == NULL)
47579+ goto out_unlock;
47580+
47581+ tsk->is_writable = 0;
47582+
47583+ tsk->acl_sp_role = 1;
47584+ tsk->acl_role_id = ++acl_sp_role_value;
47585+ tsk->role = assigned;
47586+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47587+
47588+ /* ignore additional mmap checks for processes that are writable
47589+ by the default ACL */
47590+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47591+ if (unlikely(obj->mode & GR_WRITE))
47592+ tsk->is_writable = 1;
47593+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47594+ if (unlikely(obj->mode & GR_WRITE))
47595+ tsk->is_writable = 1;
47596+
47597+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47598+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47599+#endif
47600+
47601+out_unlock:
47602+ read_unlock(&grsec_exec_file_lock);
47603+ read_unlock(&tasklist_lock);
47604+ return;
47605+}
47606+
47607+int gr_check_secure_terminal(struct task_struct *task)
47608+{
47609+ struct task_struct *p, *p2, *p3;
47610+ struct files_struct *files;
47611+ struct fdtable *fdt;
47612+ struct file *our_file = NULL, *file;
47613+ int i;
47614+
47615+ if (task->signal->tty == NULL)
47616+ return 1;
47617+
47618+ files = get_files_struct(task);
47619+ if (files != NULL) {
47620+ rcu_read_lock();
47621+ fdt = files_fdtable(files);
47622+ for (i=0; i < fdt->max_fds; i++) {
47623+ file = fcheck_files(files, i);
47624+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47625+ get_file(file);
47626+ our_file = file;
47627+ }
47628+ }
47629+ rcu_read_unlock();
47630+ put_files_struct(files);
47631+ }
47632+
47633+ if (our_file == NULL)
47634+ return 1;
47635+
47636+ read_lock(&tasklist_lock);
47637+ do_each_thread(p2, p) {
47638+ files = get_files_struct(p);
47639+ if (files == NULL ||
47640+ (p->signal && p->signal->tty == task->signal->tty)) {
47641+ if (files != NULL)
47642+ put_files_struct(files);
47643+ continue;
47644+ }
47645+ rcu_read_lock();
47646+ fdt = files_fdtable(files);
47647+ for (i=0; i < fdt->max_fds; i++) {
47648+ file = fcheck_files(files, i);
47649+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47650+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47651+ p3 = task;
47652+ while (p3->pid > 0) {
47653+ if (p3 == p)
47654+ break;
47655+ p3 = p3->real_parent;
47656+ }
47657+ if (p3 == p)
47658+ break;
47659+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47660+ gr_handle_alertkill(p);
47661+ rcu_read_unlock();
47662+ put_files_struct(files);
47663+ read_unlock(&tasklist_lock);
47664+ fput(our_file);
47665+ return 0;
47666+ }
47667+ }
47668+ rcu_read_unlock();
47669+ put_files_struct(files);
47670+ } while_each_thread(p2, p);
47671+ read_unlock(&tasklist_lock);
47672+
47673+ fput(our_file);
47674+ return 1;
47675+}
47676+
47677+ssize_t
47678+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47679+{
47680+ struct gr_arg_wrapper uwrap;
47681+ unsigned char *sprole_salt = NULL;
47682+ unsigned char *sprole_sum = NULL;
47683+ int error = sizeof (struct gr_arg_wrapper);
47684+ int error2 = 0;
47685+
47686+ mutex_lock(&gr_dev_mutex);
47687+
47688+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47689+ error = -EPERM;
47690+ goto out;
47691+ }
47692+
47693+ if (count != sizeof (struct gr_arg_wrapper)) {
47694+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47695+ error = -EINVAL;
47696+ goto out;
47697+ }
47698+
47699+
47700+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47701+ gr_auth_expires = 0;
47702+ gr_auth_attempts = 0;
47703+ }
47704+
47705+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47706+ error = -EFAULT;
47707+ goto out;
47708+ }
47709+
47710+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47711+ error = -EINVAL;
47712+ goto out;
47713+ }
47714+
47715+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47716+ error = -EFAULT;
47717+ goto out;
47718+ }
47719+
47720+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47721+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47722+ time_after(gr_auth_expires, get_seconds())) {
47723+ error = -EBUSY;
47724+ goto out;
47725+ }
47726+
47727+ /* if non-root trying to do anything other than use a special role,
47728+ do not attempt authentication, do not count towards authentication
47729+ locking
47730+ */
47731+
47732+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47733+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47734+ current_uid()) {
47735+ error = -EPERM;
47736+ goto out;
47737+ }
47738+
47739+ /* ensure pw and special role name are null terminated */
47740+
47741+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47742+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47743+
47744+ /* Okay.
47745+ * We have our enough of the argument structure..(we have yet
47746+ * to copy_from_user the tables themselves) . Copy the tables
47747+ * only if we need them, i.e. for loading operations. */
47748+
47749+ switch (gr_usermode->mode) {
47750+ case GR_STATUS:
47751+ if (gr_status & GR_READY) {
47752+ error = 1;
47753+ if (!gr_check_secure_terminal(current))
47754+ error = 3;
47755+ } else
47756+ error = 2;
47757+ goto out;
47758+ case GR_SHUTDOWN:
47759+ if ((gr_status & GR_READY)
47760+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47761+ pax_open_kernel();
47762+ gr_status &= ~GR_READY;
47763+ pax_close_kernel();
47764+
47765+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47766+ free_variables();
47767+ memset(gr_usermode, 0, sizeof (struct gr_arg));
47768+ memset(gr_system_salt, 0, GR_SALT_LEN);
47769+ memset(gr_system_sum, 0, GR_SHA_LEN);
47770+ } else if (gr_status & GR_READY) {
47771+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47772+ error = -EPERM;
47773+ } else {
47774+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47775+ error = -EAGAIN;
47776+ }
47777+ break;
47778+ case GR_ENABLE:
47779+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47780+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47781+ else {
47782+ if (gr_status & GR_READY)
47783+ error = -EAGAIN;
47784+ else
47785+ error = error2;
47786+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47787+ }
47788+ break;
47789+ case GR_RELOAD:
47790+ if (!(gr_status & GR_READY)) {
47791+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47792+ error = -EAGAIN;
47793+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47794+ preempt_disable();
47795+
47796+ pax_open_kernel();
47797+ gr_status &= ~GR_READY;
47798+ pax_close_kernel();
47799+
47800+ free_variables();
47801+ if (!(error2 = gracl_init(gr_usermode))) {
47802+ preempt_enable();
47803+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47804+ } else {
47805+ preempt_enable();
47806+ error = error2;
47807+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47808+ }
47809+ } else {
47810+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47811+ error = -EPERM;
47812+ }
47813+ break;
47814+ case GR_SEGVMOD:
47815+ if (unlikely(!(gr_status & GR_READY))) {
47816+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47817+ error = -EAGAIN;
47818+ break;
47819+ }
47820+
47821+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47822+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47823+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47824+ struct acl_subject_label *segvacl;
47825+ segvacl =
47826+ lookup_acl_subj_label(gr_usermode->segv_inode,
47827+ gr_usermode->segv_device,
47828+ current->role);
47829+ if (segvacl) {
47830+ segvacl->crashes = 0;
47831+ segvacl->expires = 0;
47832+ }
47833+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47834+ gr_remove_uid(gr_usermode->segv_uid);
47835+ }
47836+ } else {
47837+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47838+ error = -EPERM;
47839+ }
47840+ break;
47841+ case GR_SPROLE:
47842+ case GR_SPROLEPAM:
47843+ if (unlikely(!(gr_status & GR_READY))) {
47844+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47845+ error = -EAGAIN;
47846+ break;
47847+ }
47848+
47849+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47850+ current->role->expires = 0;
47851+ current->role->auth_attempts = 0;
47852+ }
47853+
47854+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47855+ time_after(current->role->expires, get_seconds())) {
47856+ error = -EBUSY;
47857+ goto out;
47858+ }
47859+
47860+ if (lookup_special_role_auth
47861+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47862+ && ((!sprole_salt && !sprole_sum)
47863+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47864+ char *p = "";
47865+ assign_special_role(gr_usermode->sp_role);
47866+ read_lock(&tasklist_lock);
47867+ if (current->real_parent)
47868+ p = current->real_parent->role->rolename;
47869+ read_unlock(&tasklist_lock);
47870+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47871+ p, acl_sp_role_value);
47872+ } else {
47873+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47874+ error = -EPERM;
47875+ if(!(current->role->auth_attempts++))
47876+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47877+
47878+ goto out;
47879+ }
47880+ break;
47881+ case GR_UNSPROLE:
47882+ if (unlikely(!(gr_status & GR_READY))) {
47883+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47884+ error = -EAGAIN;
47885+ break;
47886+ }
47887+
47888+ if (current->role->roletype & GR_ROLE_SPECIAL) {
47889+ char *p = "";
47890+ int i = 0;
47891+
47892+ read_lock(&tasklist_lock);
47893+ if (current->real_parent) {
47894+ p = current->real_parent->role->rolename;
47895+ i = current->real_parent->acl_role_id;
47896+ }
47897+ read_unlock(&tasklist_lock);
47898+
47899+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47900+ gr_set_acls(1);
47901+ } else {
47902+ error = -EPERM;
47903+ goto out;
47904+ }
47905+ break;
47906+ default:
47907+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47908+ error = -EINVAL;
47909+ break;
47910+ }
47911+
47912+ if (error != -EPERM)
47913+ goto out;
47914+
47915+ if(!(gr_auth_attempts++))
47916+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47917+
47918+ out:
47919+ mutex_unlock(&gr_dev_mutex);
47920+ return error;
47921+}
47922+
47923+/* must be called with
47924+ rcu_read_lock();
47925+ read_lock(&tasklist_lock);
47926+ read_lock(&grsec_exec_file_lock);
47927+*/
47928+int gr_apply_subject_to_task(struct task_struct *task)
47929+{
47930+ struct acl_object_label *obj;
47931+ char *tmpname;
47932+ struct acl_subject_label *tmpsubj;
47933+ struct file *filp;
47934+ struct name_entry *nmatch;
47935+
47936+ filp = task->exec_file;
47937+ if (filp == NULL)
47938+ return 0;
47939+
47940+ /* the following is to apply the correct subject
47941+ on binaries running when the RBAC system
47942+ is enabled, when the binaries have been
47943+ replaced or deleted since their execution
47944+ -----
47945+ when the RBAC system starts, the inode/dev
47946+ from exec_file will be one the RBAC system
47947+ is unaware of. It only knows the inode/dev
47948+ of the present file on disk, or the absence
47949+ of it.
47950+ */
47951+ preempt_disable();
47952+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47953+
47954+ nmatch = lookup_name_entry(tmpname);
47955+ preempt_enable();
47956+ tmpsubj = NULL;
47957+ if (nmatch) {
47958+ if (nmatch->deleted)
47959+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47960+ else
47961+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47962+ if (tmpsubj != NULL)
47963+ task->acl = tmpsubj;
47964+ }
47965+ if (tmpsubj == NULL)
47966+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47967+ task->role);
47968+ if (task->acl) {
47969+ task->is_writable = 0;
47970+ /* ignore additional mmap checks for processes that are writable
47971+ by the default ACL */
47972+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47973+ if (unlikely(obj->mode & GR_WRITE))
47974+ task->is_writable = 1;
47975+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47976+ if (unlikely(obj->mode & GR_WRITE))
47977+ task->is_writable = 1;
47978+
47979+ gr_set_proc_res(task);
47980+
47981+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47982+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47983+#endif
47984+ } else {
47985+ return 1;
47986+ }
47987+
47988+ return 0;
47989+}
47990+
47991+int
47992+gr_set_acls(const int type)
47993+{
47994+ struct task_struct *task, *task2;
47995+ struct acl_role_label *role = current->role;
47996+ __u16 acl_role_id = current->acl_role_id;
47997+ const struct cred *cred;
47998+ int ret;
47999+
48000+ rcu_read_lock();
48001+ read_lock(&tasklist_lock);
48002+ read_lock(&grsec_exec_file_lock);
48003+ do_each_thread(task2, task) {
48004+ /* check to see if we're called from the exit handler,
48005+ if so, only replace ACLs that have inherited the admin
48006+ ACL */
48007+
48008+ if (type && (task->role != role ||
48009+ task->acl_role_id != acl_role_id))
48010+ continue;
48011+
48012+ task->acl_role_id = 0;
48013+ task->acl_sp_role = 0;
48014+
48015+ if (task->exec_file) {
48016+ cred = __task_cred(task);
48017+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
48018+ ret = gr_apply_subject_to_task(task);
48019+ if (ret) {
48020+ read_unlock(&grsec_exec_file_lock);
48021+ read_unlock(&tasklist_lock);
48022+ rcu_read_unlock();
48023+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
48024+ return ret;
48025+ }
48026+ } else {
48027+ // it's a kernel process
48028+ task->role = kernel_role;
48029+ task->acl = kernel_role->root_label;
48030+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
48031+ task->acl->mode &= ~GR_PROCFIND;
48032+#endif
48033+ }
48034+ } while_each_thread(task2, task);
48035+ read_unlock(&grsec_exec_file_lock);
48036+ read_unlock(&tasklist_lock);
48037+ rcu_read_unlock();
48038+
48039+ return 0;
48040+}
48041+
48042+void
48043+gr_learn_resource(const struct task_struct *task,
48044+ const int res, const unsigned long wanted, const int gt)
48045+{
48046+ struct acl_subject_label *acl;
48047+ const struct cred *cred;
48048+
48049+ if (unlikely((gr_status & GR_READY) &&
48050+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
48051+ goto skip_reslog;
48052+
48053+#ifdef CONFIG_GRKERNSEC_RESLOG
48054+ gr_log_resource(task, res, wanted, gt);
48055+#endif
48056+ skip_reslog:
48057+
48058+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
48059+ return;
48060+
48061+ acl = task->acl;
48062+
48063+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
48064+ !(acl->resmask & (1 << (unsigned short) res))))
48065+ return;
48066+
48067+ if (wanted >= acl->res[res].rlim_cur) {
48068+ unsigned long res_add;
48069+
48070+ res_add = wanted;
48071+ switch (res) {
48072+ case RLIMIT_CPU:
48073+ res_add += GR_RLIM_CPU_BUMP;
48074+ break;
48075+ case RLIMIT_FSIZE:
48076+ res_add += GR_RLIM_FSIZE_BUMP;
48077+ break;
48078+ case RLIMIT_DATA:
48079+ res_add += GR_RLIM_DATA_BUMP;
48080+ break;
48081+ case RLIMIT_STACK:
48082+ res_add += GR_RLIM_STACK_BUMP;
48083+ break;
48084+ case RLIMIT_CORE:
48085+ res_add += GR_RLIM_CORE_BUMP;
48086+ break;
48087+ case RLIMIT_RSS:
48088+ res_add += GR_RLIM_RSS_BUMP;
48089+ break;
48090+ case RLIMIT_NPROC:
48091+ res_add += GR_RLIM_NPROC_BUMP;
48092+ break;
48093+ case RLIMIT_NOFILE:
48094+ res_add += GR_RLIM_NOFILE_BUMP;
48095+ break;
48096+ case RLIMIT_MEMLOCK:
48097+ res_add += GR_RLIM_MEMLOCK_BUMP;
48098+ break;
48099+ case RLIMIT_AS:
48100+ res_add += GR_RLIM_AS_BUMP;
48101+ break;
48102+ case RLIMIT_LOCKS:
48103+ res_add += GR_RLIM_LOCKS_BUMP;
48104+ break;
48105+ case RLIMIT_SIGPENDING:
48106+ res_add += GR_RLIM_SIGPENDING_BUMP;
48107+ break;
48108+ case RLIMIT_MSGQUEUE:
48109+ res_add += GR_RLIM_MSGQUEUE_BUMP;
48110+ break;
48111+ case RLIMIT_NICE:
48112+ res_add += GR_RLIM_NICE_BUMP;
48113+ break;
48114+ case RLIMIT_RTPRIO:
48115+ res_add += GR_RLIM_RTPRIO_BUMP;
48116+ break;
48117+ case RLIMIT_RTTIME:
48118+ res_add += GR_RLIM_RTTIME_BUMP;
48119+ break;
48120+ }
48121+
48122+ acl->res[res].rlim_cur = res_add;
48123+
48124+ if (wanted > acl->res[res].rlim_max)
48125+ acl->res[res].rlim_max = res_add;
48126+
48127+ /* only log the subject filename, since resource logging is supported for
48128+ single-subject learning only */
48129+ rcu_read_lock();
48130+ cred = __task_cred(task);
48131+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48132+ task->role->roletype, cred->uid, cred->gid, acl->filename,
48133+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
48134+ "", (unsigned long) res, &task->signal->saved_ip);
48135+ rcu_read_unlock();
48136+ }
48137+
48138+ return;
48139+}
48140+
48141+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
48142+void
48143+pax_set_initial_flags(struct linux_binprm *bprm)
48144+{
48145+ struct task_struct *task = current;
48146+ struct acl_subject_label *proc;
48147+ unsigned long flags;
48148+
48149+ if (unlikely(!(gr_status & GR_READY)))
48150+ return;
48151+
48152+ flags = pax_get_flags(task);
48153+
48154+ proc = task->acl;
48155+
48156+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
48157+ flags &= ~MF_PAX_PAGEEXEC;
48158+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
48159+ flags &= ~MF_PAX_SEGMEXEC;
48160+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
48161+ flags &= ~MF_PAX_RANDMMAP;
48162+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
48163+ flags &= ~MF_PAX_EMUTRAMP;
48164+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
48165+ flags &= ~MF_PAX_MPROTECT;
48166+
48167+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
48168+ flags |= MF_PAX_PAGEEXEC;
48169+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
48170+ flags |= MF_PAX_SEGMEXEC;
48171+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
48172+ flags |= MF_PAX_RANDMMAP;
48173+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
48174+ flags |= MF_PAX_EMUTRAMP;
48175+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
48176+ flags |= MF_PAX_MPROTECT;
48177+
48178+ pax_set_flags(task, flags);
48179+
48180+ return;
48181+}
48182+#endif
48183+
48184+#ifdef CONFIG_SYSCTL
48185+/* Eric Biederman likes breaking userland ABI and every inode-based security
48186+ system to save 35kb of memory */
48187+
48188+/* we modify the passed in filename, but adjust it back before returning */
48189+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
48190+{
48191+ struct name_entry *nmatch;
48192+ char *p, *lastp = NULL;
48193+ struct acl_object_label *obj = NULL, *tmp;
48194+ struct acl_subject_label *tmpsubj;
48195+ char c = '\0';
48196+
48197+ read_lock(&gr_inode_lock);
48198+
48199+ p = name + len - 1;
48200+ do {
48201+ nmatch = lookup_name_entry(name);
48202+ if (lastp != NULL)
48203+ *lastp = c;
48204+
48205+ if (nmatch == NULL)
48206+ goto next_component;
48207+ tmpsubj = current->acl;
48208+ do {
48209+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
48210+ if (obj != NULL) {
48211+ tmp = obj->globbed;
48212+ while (tmp) {
48213+ if (!glob_match(tmp->filename, name)) {
48214+ obj = tmp;
48215+ goto found_obj;
48216+ }
48217+ tmp = tmp->next;
48218+ }
48219+ goto found_obj;
48220+ }
48221+ } while ((tmpsubj = tmpsubj->parent_subject));
48222+next_component:
48223+ /* end case */
48224+ if (p == name)
48225+ break;
48226+
48227+ while (*p != '/')
48228+ p--;
48229+ if (p == name)
48230+ lastp = p + 1;
48231+ else {
48232+ lastp = p;
48233+ p--;
48234+ }
48235+ c = *lastp;
48236+ *lastp = '\0';
48237+ } while (1);
48238+found_obj:
48239+ read_unlock(&gr_inode_lock);
48240+ /* obj returned will always be non-null */
48241+ return obj;
48242+}
48243+
48244+/* returns 0 when allowing, non-zero on error
48245+ op of 0 is used for readdir, so we don't log the names of hidden files
48246+*/
48247+__u32
48248+gr_handle_sysctl(const struct ctl_table *table, const int op)
48249+{
48250+ struct ctl_table *tmp;
48251+ const char *proc_sys = "/proc/sys";
48252+ char *path;
48253+ struct acl_object_label *obj;
48254+ unsigned short len = 0, pos = 0, depth = 0, i;
48255+ __u32 err = 0;
48256+ __u32 mode = 0;
48257+
48258+ if (unlikely(!(gr_status & GR_READY)))
48259+ return 0;
48260+
48261+ /* for now, ignore operations on non-sysctl entries if it's not a
48262+ readdir*/
48263+ if (table->child != NULL && op != 0)
48264+ return 0;
48265+
48266+ mode |= GR_FIND;
48267+ /* it's only a read if it's an entry, read on dirs is for readdir */
48268+ if (op & MAY_READ)
48269+ mode |= GR_READ;
48270+ if (op & MAY_WRITE)
48271+ mode |= GR_WRITE;
48272+
48273+ preempt_disable();
48274+
48275+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48276+
48277+ /* it's only a read/write if it's an actual entry, not a dir
48278+ (which are opened for readdir)
48279+ */
48280+
48281+ /* convert the requested sysctl entry into a pathname */
48282+
48283+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48284+ len += strlen(tmp->procname);
48285+ len++;
48286+ depth++;
48287+ }
48288+
48289+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
48290+ /* deny */
48291+ goto out;
48292+ }
48293+
48294+ memset(path, 0, PAGE_SIZE);
48295+
48296+ memcpy(path, proc_sys, strlen(proc_sys));
48297+
48298+ pos += strlen(proc_sys);
48299+
48300+ for (; depth > 0; depth--) {
48301+ path[pos] = '/';
48302+ pos++;
48303+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48304+ if (depth == i) {
48305+ memcpy(path + pos, tmp->procname,
48306+ strlen(tmp->procname));
48307+ pos += strlen(tmp->procname);
48308+ }
48309+ i++;
48310+ }
48311+ }
48312+
48313+ obj = gr_lookup_by_name(path, pos);
48314+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
48315+
48316+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
48317+ ((err & mode) != mode))) {
48318+ __u32 new_mode = mode;
48319+
48320+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48321+
48322+ err = 0;
48323+ gr_log_learn_sysctl(path, new_mode);
48324+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
48325+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
48326+ err = -ENOENT;
48327+ } else if (!(err & GR_FIND)) {
48328+ err = -ENOENT;
48329+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
48330+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
48331+ path, (mode & GR_READ) ? " reading" : "",
48332+ (mode & GR_WRITE) ? " writing" : "");
48333+ err = -EACCES;
48334+ } else if ((err & mode) != mode) {
48335+ err = -EACCES;
48336+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
48337+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
48338+ path, (mode & GR_READ) ? " reading" : "",
48339+ (mode & GR_WRITE) ? " writing" : "");
48340+ err = 0;
48341+ } else
48342+ err = 0;
48343+
48344+ out:
48345+ preempt_enable();
48346+
48347+ return err;
48348+}
48349+#endif
48350+
48351+int
48352+gr_handle_proc_ptrace(struct task_struct *task)
48353+{
48354+ struct file *filp;
48355+ struct task_struct *tmp = task;
48356+ struct task_struct *curtemp = current;
48357+ __u32 retmode;
48358+
48359+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48360+ if (unlikely(!(gr_status & GR_READY)))
48361+ return 0;
48362+#endif
48363+
48364+ read_lock(&tasklist_lock);
48365+ read_lock(&grsec_exec_file_lock);
48366+ filp = task->exec_file;
48367+
48368+ while (tmp->pid > 0) {
48369+ if (tmp == curtemp)
48370+ break;
48371+ tmp = tmp->real_parent;
48372+ }
48373+
48374+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48375+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
48376+ read_unlock(&grsec_exec_file_lock);
48377+ read_unlock(&tasklist_lock);
48378+ return 1;
48379+ }
48380+
48381+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48382+ if (!(gr_status & GR_READY)) {
48383+ read_unlock(&grsec_exec_file_lock);
48384+ read_unlock(&tasklist_lock);
48385+ return 0;
48386+ }
48387+#endif
48388+
48389+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
48390+ read_unlock(&grsec_exec_file_lock);
48391+ read_unlock(&tasklist_lock);
48392+
48393+ if (retmode & GR_NOPTRACE)
48394+ return 1;
48395+
48396+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
48397+ && (current->acl != task->acl || (current->acl != current->role->root_label
48398+ && current->pid != task->pid)))
48399+ return 1;
48400+
48401+ return 0;
48402+}
48403+
48404+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
48405+{
48406+ if (unlikely(!(gr_status & GR_READY)))
48407+ return;
48408+
48409+ if (!(current->role->roletype & GR_ROLE_GOD))
48410+ return;
48411+
48412+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
48413+ p->role->rolename, gr_task_roletype_to_char(p),
48414+ p->acl->filename);
48415+}
48416+
48417+int
48418+gr_handle_ptrace(struct task_struct *task, const long request)
48419+{
48420+ struct task_struct *tmp = task;
48421+ struct task_struct *curtemp = current;
48422+ __u32 retmode;
48423+
48424+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48425+ if (unlikely(!(gr_status & GR_READY)))
48426+ return 0;
48427+#endif
48428+
48429+ read_lock(&tasklist_lock);
48430+ while (tmp->pid > 0) {
48431+ if (tmp == curtemp)
48432+ break;
48433+ tmp = tmp->real_parent;
48434+ }
48435+
48436+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48437+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
48438+ read_unlock(&tasklist_lock);
48439+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48440+ return 1;
48441+ }
48442+ read_unlock(&tasklist_lock);
48443+
48444+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48445+ if (!(gr_status & GR_READY))
48446+ return 0;
48447+#endif
48448+
48449+ read_lock(&grsec_exec_file_lock);
48450+ if (unlikely(!task->exec_file)) {
48451+ read_unlock(&grsec_exec_file_lock);
48452+ return 0;
48453+ }
48454+
48455+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
48456+ read_unlock(&grsec_exec_file_lock);
48457+
48458+ if (retmode & GR_NOPTRACE) {
48459+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48460+ return 1;
48461+ }
48462+
48463+ if (retmode & GR_PTRACERD) {
48464+ switch (request) {
48465+ case PTRACE_POKETEXT:
48466+ case PTRACE_POKEDATA:
48467+ case PTRACE_POKEUSR:
48468+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
48469+ case PTRACE_SETREGS:
48470+ case PTRACE_SETFPREGS:
48471+#endif
48472+#ifdef CONFIG_X86
48473+ case PTRACE_SETFPXREGS:
48474+#endif
48475+#ifdef CONFIG_ALTIVEC
48476+ case PTRACE_SETVRREGS:
48477+#endif
48478+ return 1;
48479+ default:
48480+ return 0;
48481+ }
48482+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
48483+ !(current->role->roletype & GR_ROLE_GOD) &&
48484+ (current->acl != task->acl)) {
48485+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48486+ return 1;
48487+ }
48488+
48489+ return 0;
48490+}
48491+
48492+static int is_writable_mmap(const struct file *filp)
48493+{
48494+ struct task_struct *task = current;
48495+ struct acl_object_label *obj, *obj2;
48496+
48497+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
48498+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
48499+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48500+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
48501+ task->role->root_label);
48502+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
48503+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
48504+ return 1;
48505+ }
48506+ }
48507+ return 0;
48508+}
48509+
48510+int
48511+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
48512+{
48513+ __u32 mode;
48514+
48515+ if (unlikely(!file || !(prot & PROT_EXEC)))
48516+ return 1;
48517+
48518+ if (is_writable_mmap(file))
48519+ return 0;
48520+
48521+ mode =
48522+ gr_search_file(file->f_path.dentry,
48523+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48524+ file->f_path.mnt);
48525+
48526+ if (!gr_tpe_allow(file))
48527+ return 0;
48528+
48529+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48530+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48531+ return 0;
48532+ } else if (unlikely(!(mode & GR_EXEC))) {
48533+ return 0;
48534+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48535+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48536+ return 1;
48537+ }
48538+
48539+ return 1;
48540+}
48541+
48542+int
48543+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48544+{
48545+ __u32 mode;
48546+
48547+ if (unlikely(!file || !(prot & PROT_EXEC)))
48548+ return 1;
48549+
48550+ if (is_writable_mmap(file))
48551+ return 0;
48552+
48553+ mode =
48554+ gr_search_file(file->f_path.dentry,
48555+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48556+ file->f_path.mnt);
48557+
48558+ if (!gr_tpe_allow(file))
48559+ return 0;
48560+
48561+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48562+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48563+ return 0;
48564+ } else if (unlikely(!(mode & GR_EXEC))) {
48565+ return 0;
48566+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48567+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48568+ return 1;
48569+ }
48570+
48571+ return 1;
48572+}
48573+
48574+void
48575+gr_acl_handle_psacct(struct task_struct *task, const long code)
48576+{
48577+ unsigned long runtime;
48578+ unsigned long cputime;
48579+ unsigned int wday, cday;
48580+ __u8 whr, chr;
48581+ __u8 wmin, cmin;
48582+ __u8 wsec, csec;
48583+ struct timespec timeval;
48584+
48585+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48586+ !(task->acl->mode & GR_PROCACCT)))
48587+ return;
48588+
48589+ do_posix_clock_monotonic_gettime(&timeval);
48590+ runtime = timeval.tv_sec - task->start_time.tv_sec;
48591+ wday = runtime / (3600 * 24);
48592+ runtime -= wday * (3600 * 24);
48593+ whr = runtime / 3600;
48594+ runtime -= whr * 3600;
48595+ wmin = runtime / 60;
48596+ runtime -= wmin * 60;
48597+ wsec = runtime;
48598+
48599+ cputime = (task->utime + task->stime) / HZ;
48600+ cday = cputime / (3600 * 24);
48601+ cputime -= cday * (3600 * 24);
48602+ chr = cputime / 3600;
48603+ cputime -= chr * 3600;
48604+ cmin = cputime / 60;
48605+ cputime -= cmin * 60;
48606+ csec = cputime;
48607+
48608+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48609+
48610+ return;
48611+}
48612+
48613+void gr_set_kernel_label(struct task_struct *task)
48614+{
48615+ if (gr_status & GR_READY) {
48616+ task->role = kernel_role;
48617+ task->acl = kernel_role->root_label;
48618+ }
48619+ return;
48620+}
48621+
48622+#ifdef CONFIG_TASKSTATS
48623+int gr_is_taskstats_denied(int pid)
48624+{
48625+ struct task_struct *task;
48626+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48627+ const struct cred *cred;
48628+#endif
48629+ int ret = 0;
48630+
48631+ /* restrict taskstats viewing to un-chrooted root users
48632+ who have the 'view' subject flag if the RBAC system is enabled
48633+ */
48634+
48635+ rcu_read_lock();
48636+ read_lock(&tasklist_lock);
48637+ task = find_task_by_vpid(pid);
48638+ if (task) {
48639+#ifdef CONFIG_GRKERNSEC_CHROOT
48640+ if (proc_is_chrooted(task))
48641+ ret = -EACCES;
48642+#endif
48643+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48644+ cred = __task_cred(task);
48645+#ifdef CONFIG_GRKERNSEC_PROC_USER
48646+ if (cred->uid != 0)
48647+ ret = -EACCES;
48648+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48649+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48650+ ret = -EACCES;
48651+#endif
48652+#endif
48653+ if (gr_status & GR_READY) {
48654+ if (!(task->acl->mode & GR_VIEW))
48655+ ret = -EACCES;
48656+ }
48657+ } else
48658+ ret = -ENOENT;
48659+
48660+ read_unlock(&tasklist_lock);
48661+ rcu_read_unlock();
48662+
48663+ return ret;
48664+}
48665+#endif
48666+
48667+/* AUXV entries are filled via a descendant of search_binary_handler
48668+ after we've already applied the subject for the target
48669+*/
48670+int gr_acl_enable_at_secure(void)
48671+{
48672+ if (unlikely(!(gr_status & GR_READY)))
48673+ return 0;
48674+
48675+ if (current->acl->mode & GR_ATSECURE)
48676+ return 1;
48677+
48678+ return 0;
48679+}
48680+
48681+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48682+{
48683+ struct task_struct *task = current;
48684+ struct dentry *dentry = file->f_path.dentry;
48685+ struct vfsmount *mnt = file->f_path.mnt;
48686+ struct acl_object_label *obj, *tmp;
48687+ struct acl_subject_label *subj;
48688+ unsigned int bufsize;
48689+ int is_not_root;
48690+ char *path;
48691+ dev_t dev = __get_dev(dentry);
48692+
48693+ if (unlikely(!(gr_status & GR_READY)))
48694+ return 1;
48695+
48696+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48697+ return 1;
48698+
48699+ /* ignore Eric Biederman */
48700+ if (IS_PRIVATE(dentry->d_inode))
48701+ return 1;
48702+
48703+ subj = task->acl;
48704+ do {
48705+ obj = lookup_acl_obj_label(ino, dev, subj);
48706+ if (obj != NULL)
48707+ return (obj->mode & GR_FIND) ? 1 : 0;
48708+ } while ((subj = subj->parent_subject));
48709+
48710+ /* this is purely an optimization since we're looking for an object
48711+ for the directory we're doing a readdir on
48712+ if it's possible for any globbed object to match the entry we're
48713+ filling into the directory, then the object we find here will be
48714+ an anchor point with attached globbed objects
48715+ */
48716+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48717+ if (obj->globbed == NULL)
48718+ return (obj->mode & GR_FIND) ? 1 : 0;
48719+
48720+ is_not_root = ((obj->filename[0] == '/') &&
48721+ (obj->filename[1] == '\0')) ? 0 : 1;
48722+ bufsize = PAGE_SIZE - namelen - is_not_root;
48723+
48724+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
48725+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48726+ return 1;
48727+
48728+ preempt_disable();
48729+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48730+ bufsize);
48731+
48732+ bufsize = strlen(path);
48733+
48734+ /* if base is "/", don't append an additional slash */
48735+ if (is_not_root)
48736+ *(path + bufsize) = '/';
48737+ memcpy(path + bufsize + is_not_root, name, namelen);
48738+ *(path + bufsize + namelen + is_not_root) = '\0';
48739+
48740+ tmp = obj->globbed;
48741+ while (tmp) {
48742+ if (!glob_match(tmp->filename, path)) {
48743+ preempt_enable();
48744+ return (tmp->mode & GR_FIND) ? 1 : 0;
48745+ }
48746+ tmp = tmp->next;
48747+ }
48748+ preempt_enable();
48749+ return (obj->mode & GR_FIND) ? 1 : 0;
48750+}
48751+
48752+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48753+EXPORT_SYMBOL(gr_acl_is_enabled);
48754+#endif
48755+EXPORT_SYMBOL(gr_learn_resource);
48756+EXPORT_SYMBOL(gr_set_kernel_label);
48757+#ifdef CONFIG_SECURITY
48758+EXPORT_SYMBOL(gr_check_user_change);
48759+EXPORT_SYMBOL(gr_check_group_change);
48760+#endif
48761+
48762diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
48763--- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48764+++ linux-3.0.4/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
48765@@ -0,0 +1,101 @@
48766+#include <linux/kernel.h>
48767+#include <linux/module.h>
48768+#include <linux/sched.h>
48769+#include <linux/gracl.h>
48770+#include <linux/grsecurity.h>
48771+#include <linux/grinternal.h>
48772+
48773+extern const char *captab_log[];
48774+extern int captab_log_entries;
48775+
48776+int
48777+gr_acl_is_capable(const int cap)
48778+{
48779+ struct task_struct *task = current;
48780+ const struct cred *cred = current_cred();
48781+ struct acl_subject_label *curracl;
48782+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48783+ kernel_cap_t cap_audit = __cap_empty_set;
48784+
48785+ if (!gr_acl_is_enabled())
48786+ return 1;
48787+
48788+ curracl = task->acl;
48789+
48790+ cap_drop = curracl->cap_lower;
48791+ cap_mask = curracl->cap_mask;
48792+ cap_audit = curracl->cap_invert_audit;
48793+
48794+ while ((curracl = curracl->parent_subject)) {
48795+ /* if the cap isn't specified in the current computed mask but is specified in the
48796+ current level subject, and is lowered in the current level subject, then add
48797+ it to the set of dropped capabilities
48798+ otherwise, add the current level subject's mask to the current computed mask
48799+ */
48800+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48801+ cap_raise(cap_mask, cap);
48802+ if (cap_raised(curracl->cap_lower, cap))
48803+ cap_raise(cap_drop, cap);
48804+ if (cap_raised(curracl->cap_invert_audit, cap))
48805+ cap_raise(cap_audit, cap);
48806+ }
48807+ }
48808+
48809+ if (!cap_raised(cap_drop, cap)) {
48810+ if (cap_raised(cap_audit, cap))
48811+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48812+ return 1;
48813+ }
48814+
48815+ curracl = task->acl;
48816+
48817+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48818+ && cap_raised(cred->cap_effective, cap)) {
48819+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48820+ task->role->roletype, cred->uid,
48821+ cred->gid, task->exec_file ?
48822+ gr_to_filename(task->exec_file->f_path.dentry,
48823+ task->exec_file->f_path.mnt) : curracl->filename,
48824+ curracl->filename, 0UL,
48825+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48826+ return 1;
48827+ }
48828+
48829+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48830+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48831+ return 0;
48832+}
48833+
48834+int
48835+gr_acl_is_capable_nolog(const int cap)
48836+{
48837+ struct acl_subject_label *curracl;
48838+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48839+
48840+ if (!gr_acl_is_enabled())
48841+ return 1;
48842+
48843+ curracl = current->acl;
48844+
48845+ cap_drop = curracl->cap_lower;
48846+ cap_mask = curracl->cap_mask;
48847+
48848+ while ((curracl = curracl->parent_subject)) {
48849+ /* if the cap isn't specified in the current computed mask but is specified in the
48850+ current level subject, and is lowered in the current level subject, then add
48851+ it to the set of dropped capabilities
48852+ otherwise, add the current level subject's mask to the current computed mask
48853+ */
48854+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48855+ cap_raise(cap_mask, cap);
48856+ if (cap_raised(curracl->cap_lower, cap))
48857+ cap_raise(cap_drop, cap);
48858+ }
48859+ }
48860+
48861+ if (!cap_raised(cap_drop, cap))
48862+ return 1;
48863+
48864+ return 0;
48865+}
48866+
48867diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
48868--- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48869+++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
48870@@ -0,0 +1,431 @@
48871+#include <linux/kernel.h>
48872+#include <linux/sched.h>
48873+#include <linux/types.h>
48874+#include <linux/fs.h>
48875+#include <linux/file.h>
48876+#include <linux/stat.h>
48877+#include <linux/grsecurity.h>
48878+#include <linux/grinternal.h>
48879+#include <linux/gracl.h>
48880+
48881+__u32
48882+gr_acl_handle_hidden_file(const struct dentry * dentry,
48883+ const struct vfsmount * mnt)
48884+{
48885+ __u32 mode;
48886+
48887+ if (unlikely(!dentry->d_inode))
48888+ return GR_FIND;
48889+
48890+ mode =
48891+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48892+
48893+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48894+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48895+ return mode;
48896+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48897+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48898+ return 0;
48899+ } else if (unlikely(!(mode & GR_FIND)))
48900+ return 0;
48901+
48902+ return GR_FIND;
48903+}
48904+
48905+__u32
48906+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48907+ const int fmode)
48908+{
48909+ __u32 reqmode = GR_FIND;
48910+ __u32 mode;
48911+
48912+ if (unlikely(!dentry->d_inode))
48913+ return reqmode;
48914+
48915+ if (unlikely(fmode & O_APPEND))
48916+ reqmode |= GR_APPEND;
48917+ else if (unlikely(fmode & FMODE_WRITE))
48918+ reqmode |= GR_WRITE;
48919+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48920+ reqmode |= GR_READ;
48921+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
48922+ reqmode &= ~GR_READ;
48923+ mode =
48924+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48925+ mnt);
48926+
48927+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48928+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48929+ reqmode & GR_READ ? " reading" : "",
48930+ reqmode & GR_WRITE ? " writing" : reqmode &
48931+ GR_APPEND ? " appending" : "");
48932+ return reqmode;
48933+ } else
48934+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48935+ {
48936+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48937+ reqmode & GR_READ ? " reading" : "",
48938+ reqmode & GR_WRITE ? " writing" : reqmode &
48939+ GR_APPEND ? " appending" : "");
48940+ return 0;
48941+ } else if (unlikely((mode & reqmode) != reqmode))
48942+ return 0;
48943+
48944+ return reqmode;
48945+}
48946+
48947+__u32
48948+gr_acl_handle_creat(const struct dentry * dentry,
48949+ const struct dentry * p_dentry,
48950+ const struct vfsmount * p_mnt, const int fmode,
48951+ const int imode)
48952+{
48953+ __u32 reqmode = GR_WRITE | GR_CREATE;
48954+ __u32 mode;
48955+
48956+ if (unlikely(fmode & O_APPEND))
48957+ reqmode |= GR_APPEND;
48958+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48959+ reqmode |= GR_READ;
48960+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48961+ reqmode |= GR_SETID;
48962+
48963+ mode =
48964+ gr_check_create(dentry, p_dentry, p_mnt,
48965+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48966+
48967+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48968+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48969+ reqmode & GR_READ ? " reading" : "",
48970+ reqmode & GR_WRITE ? " writing" : reqmode &
48971+ GR_APPEND ? " appending" : "");
48972+ return reqmode;
48973+ } else
48974+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48975+ {
48976+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48977+ reqmode & GR_READ ? " reading" : "",
48978+ reqmode & GR_WRITE ? " writing" : reqmode &
48979+ GR_APPEND ? " appending" : "");
48980+ return 0;
48981+ } else if (unlikely((mode & reqmode) != reqmode))
48982+ return 0;
48983+
48984+ return reqmode;
48985+}
48986+
48987+__u32
48988+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48989+ const int fmode)
48990+{
48991+ __u32 mode, reqmode = GR_FIND;
48992+
48993+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48994+ reqmode |= GR_EXEC;
48995+ if (fmode & S_IWOTH)
48996+ reqmode |= GR_WRITE;
48997+ if (fmode & S_IROTH)
48998+ reqmode |= GR_READ;
48999+
49000+ mode =
49001+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
49002+ mnt);
49003+
49004+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49005+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
49006+ reqmode & GR_READ ? " reading" : "",
49007+ reqmode & GR_WRITE ? " writing" : "",
49008+ reqmode & GR_EXEC ? " executing" : "");
49009+ return reqmode;
49010+ } else
49011+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49012+ {
49013+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
49014+ reqmode & GR_READ ? " reading" : "",
49015+ reqmode & GR_WRITE ? " writing" : "",
49016+ reqmode & GR_EXEC ? " executing" : "");
49017+ return 0;
49018+ } else if (unlikely((mode & reqmode) != reqmode))
49019+ return 0;
49020+
49021+ return reqmode;
49022+}
49023+
49024+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
49025+{
49026+ __u32 mode;
49027+
49028+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
49029+
49030+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49031+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
49032+ return mode;
49033+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49034+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
49035+ return 0;
49036+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
49037+ return 0;
49038+
49039+ return (reqmode);
49040+}
49041+
49042+__u32
49043+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
49044+{
49045+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
49046+}
49047+
49048+__u32
49049+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
49050+{
49051+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
49052+}
49053+
49054+__u32
49055+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
49056+{
49057+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
49058+}
49059+
49060+__u32
49061+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
49062+{
49063+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
49064+}
49065+
49066+__u32
49067+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
49068+ mode_t mode)
49069+{
49070+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
49071+ return 1;
49072+
49073+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
49074+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
49075+ GR_FCHMOD_ACL_MSG);
49076+ } else {
49077+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
49078+ }
49079+}
49080+
49081+__u32
49082+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
49083+ mode_t mode)
49084+{
49085+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
49086+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
49087+ GR_CHMOD_ACL_MSG);
49088+ } else {
49089+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
49090+ }
49091+}
49092+
49093+__u32
49094+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
49095+{
49096+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
49097+}
49098+
49099+__u32
49100+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
49101+{
49102+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
49103+}
49104+
49105+__u32
49106+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
49107+{
49108+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
49109+}
49110+
49111+__u32
49112+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
49113+{
49114+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
49115+ GR_UNIXCONNECT_ACL_MSG);
49116+}
49117+
49118+/* hardlinks require at minimum create permission,
49119+ any additional privilege required is based on the
49120+ privilege of the file being linked to
49121+*/
49122+__u32
49123+gr_acl_handle_link(const struct dentry * new_dentry,
49124+ const struct dentry * parent_dentry,
49125+ const struct vfsmount * parent_mnt,
49126+ const struct dentry * old_dentry,
49127+ const struct vfsmount * old_mnt, const char *to)
49128+{
49129+ __u32 mode;
49130+ __u32 needmode = GR_CREATE | GR_LINK;
49131+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
49132+
49133+ mode =
49134+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
49135+ old_mnt);
49136+
49137+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
49138+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49139+ return mode;
49140+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49141+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49142+ return 0;
49143+ } else if (unlikely((mode & needmode) != needmode))
49144+ return 0;
49145+
49146+ return 1;
49147+}
49148+
49149+__u32
49150+gr_acl_handle_symlink(const struct dentry * new_dentry,
49151+ const struct dentry * parent_dentry,
49152+ const struct vfsmount * parent_mnt, const char *from)
49153+{
49154+ __u32 needmode = GR_WRITE | GR_CREATE;
49155+ __u32 mode;
49156+
49157+ mode =
49158+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
49159+ GR_CREATE | GR_AUDIT_CREATE |
49160+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
49161+
49162+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
49163+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49164+ return mode;
49165+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49166+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49167+ return 0;
49168+ } else if (unlikely((mode & needmode) != needmode))
49169+ return 0;
49170+
49171+ return (GR_WRITE | GR_CREATE);
49172+}
49173+
49174+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
49175+{
49176+ __u32 mode;
49177+
49178+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49179+
49180+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49181+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
49182+ return mode;
49183+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49184+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
49185+ return 0;
49186+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
49187+ return 0;
49188+
49189+ return (reqmode);
49190+}
49191+
49192+__u32
49193+gr_acl_handle_mknod(const struct dentry * new_dentry,
49194+ const struct dentry * parent_dentry,
49195+ const struct vfsmount * parent_mnt,
49196+ const int mode)
49197+{
49198+ __u32 reqmode = GR_WRITE | GR_CREATE;
49199+ if (unlikely(mode & (S_ISUID | S_ISGID)))
49200+ reqmode |= GR_SETID;
49201+
49202+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49203+ reqmode, GR_MKNOD_ACL_MSG);
49204+}
49205+
49206+__u32
49207+gr_acl_handle_mkdir(const struct dentry *new_dentry,
49208+ const struct dentry *parent_dentry,
49209+ const struct vfsmount *parent_mnt)
49210+{
49211+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49212+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
49213+}
49214+
49215+#define RENAME_CHECK_SUCCESS(old, new) \
49216+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
49217+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
49218+
49219+int
49220+gr_acl_handle_rename(struct dentry *new_dentry,
49221+ struct dentry *parent_dentry,
49222+ const struct vfsmount *parent_mnt,
49223+ struct dentry *old_dentry,
49224+ struct inode *old_parent_inode,
49225+ struct vfsmount *old_mnt, const char *newname)
49226+{
49227+ __u32 comp1, comp2;
49228+ int error = 0;
49229+
49230+ if (unlikely(!gr_acl_is_enabled()))
49231+ return 0;
49232+
49233+ if (!new_dentry->d_inode) {
49234+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
49235+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
49236+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
49237+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
49238+ GR_DELETE | GR_AUDIT_DELETE |
49239+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49240+ GR_SUPPRESS, old_mnt);
49241+ } else {
49242+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
49243+ GR_CREATE | GR_DELETE |
49244+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
49245+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49246+ GR_SUPPRESS, parent_mnt);
49247+ comp2 =
49248+ gr_search_file(old_dentry,
49249+ GR_READ | GR_WRITE | GR_AUDIT_READ |
49250+ GR_DELETE | GR_AUDIT_DELETE |
49251+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
49252+ }
49253+
49254+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
49255+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
49256+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49257+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
49258+ && !(comp2 & GR_SUPPRESS)) {
49259+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49260+ error = -EACCES;
49261+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
49262+ error = -EACCES;
49263+
49264+ return error;
49265+}
49266+
49267+void
49268+gr_acl_handle_exit(void)
49269+{
49270+ u16 id;
49271+ char *rolename;
49272+ struct file *exec_file;
49273+
49274+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
49275+ !(current->role->roletype & GR_ROLE_PERSIST))) {
49276+ id = current->acl_role_id;
49277+ rolename = current->role->rolename;
49278+ gr_set_acls(1);
49279+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
49280+ }
49281+
49282+ write_lock(&grsec_exec_file_lock);
49283+ exec_file = current->exec_file;
49284+ current->exec_file = NULL;
49285+ write_unlock(&grsec_exec_file_lock);
49286+
49287+ if (exec_file)
49288+ fput(exec_file);
49289+}
49290+
49291+int
49292+gr_acl_handle_procpidmem(const struct task_struct *task)
49293+{
49294+ if (unlikely(!gr_acl_is_enabled()))
49295+ return 0;
49296+
49297+ if (task != current && task->acl->mode & GR_PROTPROCFD)
49298+ return -EACCES;
49299+
49300+ return 0;
49301+}
49302diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
49303--- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
49304+++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
49305@@ -0,0 +1,381 @@
49306+#include <linux/kernel.h>
49307+#include <asm/uaccess.h>
49308+#include <asm/errno.h>
49309+#include <net/sock.h>
49310+#include <linux/file.h>
49311+#include <linux/fs.h>
49312+#include <linux/net.h>
49313+#include <linux/in.h>
49314+#include <linux/skbuff.h>
49315+#include <linux/ip.h>
49316+#include <linux/udp.h>
49317+#include <linux/types.h>
49318+#include <linux/sched.h>
49319+#include <linux/netdevice.h>
49320+#include <linux/inetdevice.h>
49321+#include <linux/gracl.h>
49322+#include <linux/grsecurity.h>
49323+#include <linux/grinternal.h>
49324+
49325+#define GR_BIND 0x01
49326+#define GR_CONNECT 0x02
49327+#define GR_INVERT 0x04
49328+#define GR_BINDOVERRIDE 0x08
49329+#define GR_CONNECTOVERRIDE 0x10
49330+#define GR_SOCK_FAMILY 0x20
49331+
49332+static const char * gr_protocols[IPPROTO_MAX] = {
49333+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
49334+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
49335+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
49336+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
49337+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
49338+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
49339+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
49340+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
49341+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
49342+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
49343+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
49344+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
49345+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
49346+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
49347+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
49348+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
49349+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
49350+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
49351+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
49352+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
49353+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
49354+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
49355+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
49356+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
49357+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
49358+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
49359+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
49360+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
49361+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
49362+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
49363+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
49364+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
49365+ };
49366+
49367+static const char * gr_socktypes[SOCK_MAX] = {
49368+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
49369+ "unknown:7", "unknown:8", "unknown:9", "packet"
49370+ };
49371+
49372+static const char * gr_sockfamilies[AF_MAX+1] = {
49373+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
49374+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
49375+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
49376+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
49377+ };
49378+
49379+const char *
49380+gr_proto_to_name(unsigned char proto)
49381+{
49382+ return gr_protocols[proto];
49383+}
49384+
49385+const char *
49386+gr_socktype_to_name(unsigned char type)
49387+{
49388+ return gr_socktypes[type];
49389+}
49390+
49391+const char *
49392+gr_sockfamily_to_name(unsigned char family)
49393+{
49394+ return gr_sockfamilies[family];
49395+}
49396+
49397+int
49398+gr_search_socket(const int domain, const int type, const int protocol)
49399+{
49400+ struct acl_subject_label *curr;
49401+ const struct cred *cred = current_cred();
49402+
49403+ if (unlikely(!gr_acl_is_enabled()))
49404+ goto exit;
49405+
49406+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
49407+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
49408+ goto exit; // let the kernel handle it
49409+
49410+ curr = current->acl;
49411+
49412+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
49413+ /* the family is allowed, if this is PF_INET allow it only if
49414+ the extra sock type/protocol checks pass */
49415+ if (domain == PF_INET)
49416+ goto inet_check;
49417+ goto exit;
49418+ } else {
49419+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49420+ __u32 fakeip = 0;
49421+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49422+ current->role->roletype, cred->uid,
49423+ cred->gid, current->exec_file ?
49424+ gr_to_filename(current->exec_file->f_path.dentry,
49425+ current->exec_file->f_path.mnt) :
49426+ curr->filename, curr->filename,
49427+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
49428+ &current->signal->saved_ip);
49429+ goto exit;
49430+ }
49431+ goto exit_fail;
49432+ }
49433+
49434+inet_check:
49435+ /* the rest of this checking is for IPv4 only */
49436+ if (!curr->ips)
49437+ goto exit;
49438+
49439+ if ((curr->ip_type & (1 << type)) &&
49440+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
49441+ goto exit;
49442+
49443+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49444+ /* we don't place acls on raw sockets , and sometimes
49445+ dgram/ip sockets are opened for ioctl and not
49446+ bind/connect, so we'll fake a bind learn log */
49447+ if (type == SOCK_RAW || type == SOCK_PACKET) {
49448+ __u32 fakeip = 0;
49449+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49450+ current->role->roletype, cred->uid,
49451+ cred->gid, current->exec_file ?
49452+ gr_to_filename(current->exec_file->f_path.dentry,
49453+ current->exec_file->f_path.mnt) :
49454+ curr->filename, curr->filename,
49455+ &fakeip, 0, type,
49456+ protocol, GR_CONNECT, &current->signal->saved_ip);
49457+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
49458+ __u32 fakeip = 0;
49459+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49460+ current->role->roletype, cred->uid,
49461+ cred->gid, current->exec_file ?
49462+ gr_to_filename(current->exec_file->f_path.dentry,
49463+ current->exec_file->f_path.mnt) :
49464+ curr->filename, curr->filename,
49465+ &fakeip, 0, type,
49466+ protocol, GR_BIND, &current->signal->saved_ip);
49467+ }
49468+ /* we'll log when they use connect or bind */
49469+ goto exit;
49470+ }
49471+
49472+exit_fail:
49473+ if (domain == PF_INET)
49474+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
49475+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
49476+ else
49477+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
49478+ gr_socktype_to_name(type), protocol);
49479+
49480+ return 0;
49481+exit:
49482+ return 1;
49483+}
49484+
49485+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
49486+{
49487+ if ((ip->mode & mode) &&
49488+ (ip_port >= ip->low) &&
49489+ (ip_port <= ip->high) &&
49490+ ((ntohl(ip_addr) & our_netmask) ==
49491+ (ntohl(our_addr) & our_netmask))
49492+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
49493+ && (ip->type & (1 << type))) {
49494+ if (ip->mode & GR_INVERT)
49495+ return 2; // specifically denied
49496+ else
49497+ return 1; // allowed
49498+ }
49499+
49500+ return 0; // not specifically allowed, may continue parsing
49501+}
49502+
49503+static int
49504+gr_search_connectbind(const int full_mode, struct sock *sk,
49505+ struct sockaddr_in *addr, const int type)
49506+{
49507+ char iface[IFNAMSIZ] = {0};
49508+ struct acl_subject_label *curr;
49509+ struct acl_ip_label *ip;
49510+ struct inet_sock *isk;
49511+ struct net_device *dev;
49512+ struct in_device *idev;
49513+ unsigned long i;
49514+ int ret;
49515+ int mode = full_mode & (GR_BIND | GR_CONNECT);
49516+ __u32 ip_addr = 0;
49517+ __u32 our_addr;
49518+ __u32 our_netmask;
49519+ char *p;
49520+ __u16 ip_port = 0;
49521+ const struct cred *cred = current_cred();
49522+
49523+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49524+ return 0;
49525+
49526+ curr = current->acl;
49527+ isk = inet_sk(sk);
49528+
49529+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49530+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49531+ addr->sin_addr.s_addr = curr->inaddr_any_override;
49532+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49533+ struct sockaddr_in saddr;
49534+ int err;
49535+
49536+ saddr.sin_family = AF_INET;
49537+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
49538+ saddr.sin_port = isk->inet_sport;
49539+
49540+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49541+ if (err)
49542+ return err;
49543+
49544+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49545+ if (err)
49546+ return err;
49547+ }
49548+
49549+ if (!curr->ips)
49550+ return 0;
49551+
49552+ ip_addr = addr->sin_addr.s_addr;
49553+ ip_port = ntohs(addr->sin_port);
49554+
49555+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49556+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49557+ current->role->roletype, cred->uid,
49558+ cred->gid, current->exec_file ?
49559+ gr_to_filename(current->exec_file->f_path.dentry,
49560+ current->exec_file->f_path.mnt) :
49561+ curr->filename, curr->filename,
49562+ &ip_addr, ip_port, type,
49563+ sk->sk_protocol, mode, &current->signal->saved_ip);
49564+ return 0;
49565+ }
49566+
49567+ for (i = 0; i < curr->ip_num; i++) {
49568+ ip = *(curr->ips + i);
49569+ if (ip->iface != NULL) {
49570+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
49571+ p = strchr(iface, ':');
49572+ if (p != NULL)
49573+ *p = '\0';
49574+ dev = dev_get_by_name(sock_net(sk), iface);
49575+ if (dev == NULL)
49576+ continue;
49577+ idev = in_dev_get(dev);
49578+ if (idev == NULL) {
49579+ dev_put(dev);
49580+ continue;
49581+ }
49582+ rcu_read_lock();
49583+ for_ifa(idev) {
49584+ if (!strcmp(ip->iface, ifa->ifa_label)) {
49585+ our_addr = ifa->ifa_address;
49586+ our_netmask = 0xffffffff;
49587+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49588+ if (ret == 1) {
49589+ rcu_read_unlock();
49590+ in_dev_put(idev);
49591+ dev_put(dev);
49592+ return 0;
49593+ } else if (ret == 2) {
49594+ rcu_read_unlock();
49595+ in_dev_put(idev);
49596+ dev_put(dev);
49597+ goto denied;
49598+ }
49599+ }
49600+ } endfor_ifa(idev);
49601+ rcu_read_unlock();
49602+ in_dev_put(idev);
49603+ dev_put(dev);
49604+ } else {
49605+ our_addr = ip->addr;
49606+ our_netmask = ip->netmask;
49607+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49608+ if (ret == 1)
49609+ return 0;
49610+ else if (ret == 2)
49611+ goto denied;
49612+ }
49613+ }
49614+
49615+denied:
49616+ if (mode == GR_BIND)
49617+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49618+ else if (mode == GR_CONNECT)
49619+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49620+
49621+ return -EACCES;
49622+}
49623+
49624+int
49625+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49626+{
49627+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49628+}
49629+
49630+int
49631+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49632+{
49633+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49634+}
49635+
49636+int gr_search_listen(struct socket *sock)
49637+{
49638+ struct sock *sk = sock->sk;
49639+ struct sockaddr_in addr;
49640+
49641+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49642+ addr.sin_port = inet_sk(sk)->inet_sport;
49643+
49644+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49645+}
49646+
49647+int gr_search_accept(struct socket *sock)
49648+{
49649+ struct sock *sk = sock->sk;
49650+ struct sockaddr_in addr;
49651+
49652+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49653+ addr.sin_port = inet_sk(sk)->inet_sport;
49654+
49655+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49656+}
49657+
49658+int
49659+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49660+{
49661+ if (addr)
49662+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49663+ else {
49664+ struct sockaddr_in sin;
49665+ const struct inet_sock *inet = inet_sk(sk);
49666+
49667+ sin.sin_addr.s_addr = inet->inet_daddr;
49668+ sin.sin_port = inet->inet_dport;
49669+
49670+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49671+ }
49672+}
49673+
49674+int
49675+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49676+{
49677+ struct sockaddr_in sin;
49678+
49679+ if (unlikely(skb->len < sizeof (struct udphdr)))
49680+ return 0; // skip this packet
49681+
49682+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49683+ sin.sin_port = udp_hdr(skb)->source;
49684+
49685+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49686+}
49687diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
49688--- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49689+++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
49690@@ -0,0 +1,207 @@
49691+#include <linux/kernel.h>
49692+#include <linux/mm.h>
49693+#include <linux/sched.h>
49694+#include <linux/poll.h>
49695+#include <linux/string.h>
49696+#include <linux/file.h>
49697+#include <linux/types.h>
49698+#include <linux/vmalloc.h>
49699+#include <linux/grinternal.h>
49700+
49701+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49702+ size_t count, loff_t *ppos);
49703+extern int gr_acl_is_enabled(void);
49704+
49705+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49706+static int gr_learn_attached;
49707+
49708+/* use a 512k buffer */
49709+#define LEARN_BUFFER_SIZE (512 * 1024)
49710+
49711+static DEFINE_SPINLOCK(gr_learn_lock);
49712+static DEFINE_MUTEX(gr_learn_user_mutex);
49713+
49714+/* we need to maintain two buffers, so that the kernel context of grlearn
49715+ uses a semaphore around the userspace copying, and the other kernel contexts
49716+ use a spinlock when copying into the buffer, since they cannot sleep
49717+*/
49718+static char *learn_buffer;
49719+static char *learn_buffer_user;
49720+static int learn_buffer_len;
49721+static int learn_buffer_user_len;
49722+
49723+static ssize_t
49724+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49725+{
49726+ DECLARE_WAITQUEUE(wait, current);
49727+ ssize_t retval = 0;
49728+
49729+ add_wait_queue(&learn_wait, &wait);
49730+ set_current_state(TASK_INTERRUPTIBLE);
49731+ do {
49732+ mutex_lock(&gr_learn_user_mutex);
49733+ spin_lock(&gr_learn_lock);
49734+ if (learn_buffer_len)
49735+ break;
49736+ spin_unlock(&gr_learn_lock);
49737+ mutex_unlock(&gr_learn_user_mutex);
49738+ if (file->f_flags & O_NONBLOCK) {
49739+ retval = -EAGAIN;
49740+ goto out;
49741+ }
49742+ if (signal_pending(current)) {
49743+ retval = -ERESTARTSYS;
49744+ goto out;
49745+ }
49746+
49747+ schedule();
49748+ } while (1);
49749+
49750+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49751+ learn_buffer_user_len = learn_buffer_len;
49752+ retval = learn_buffer_len;
49753+ learn_buffer_len = 0;
49754+
49755+ spin_unlock(&gr_learn_lock);
49756+
49757+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49758+ retval = -EFAULT;
49759+
49760+ mutex_unlock(&gr_learn_user_mutex);
49761+out:
49762+ set_current_state(TASK_RUNNING);
49763+ remove_wait_queue(&learn_wait, &wait);
49764+ return retval;
49765+}
49766+
49767+static unsigned int
49768+poll_learn(struct file * file, poll_table * wait)
49769+{
49770+ poll_wait(file, &learn_wait, wait);
49771+
49772+ if (learn_buffer_len)
49773+ return (POLLIN | POLLRDNORM);
49774+
49775+ return 0;
49776+}
49777+
49778+void
49779+gr_clear_learn_entries(void)
49780+{
49781+ char *tmp;
49782+
49783+ mutex_lock(&gr_learn_user_mutex);
49784+ spin_lock(&gr_learn_lock);
49785+ tmp = learn_buffer;
49786+ learn_buffer = NULL;
49787+ spin_unlock(&gr_learn_lock);
49788+ if (tmp)
49789+ vfree(tmp);
49790+ if (learn_buffer_user != NULL) {
49791+ vfree(learn_buffer_user);
49792+ learn_buffer_user = NULL;
49793+ }
49794+ learn_buffer_len = 0;
49795+ mutex_unlock(&gr_learn_user_mutex);
49796+
49797+ return;
49798+}
49799+
49800+void
49801+gr_add_learn_entry(const char *fmt, ...)
49802+{
49803+ va_list args;
49804+ unsigned int len;
49805+
49806+ if (!gr_learn_attached)
49807+ return;
49808+
49809+ spin_lock(&gr_learn_lock);
49810+
49811+ /* leave a gap at the end so we know when it's "full" but don't have to
49812+ compute the exact length of the string we're trying to append
49813+ */
49814+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49815+ spin_unlock(&gr_learn_lock);
49816+ wake_up_interruptible(&learn_wait);
49817+ return;
49818+ }
49819+ if (learn_buffer == NULL) {
49820+ spin_unlock(&gr_learn_lock);
49821+ return;
49822+ }
49823+
49824+ va_start(args, fmt);
49825+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49826+ va_end(args);
49827+
49828+ learn_buffer_len += len + 1;
49829+
49830+ spin_unlock(&gr_learn_lock);
49831+ wake_up_interruptible(&learn_wait);
49832+
49833+ return;
49834+}
49835+
49836+static int
49837+open_learn(struct inode *inode, struct file *file)
49838+{
49839+ if (file->f_mode & FMODE_READ && gr_learn_attached)
49840+ return -EBUSY;
49841+ if (file->f_mode & FMODE_READ) {
49842+ int retval = 0;
49843+ mutex_lock(&gr_learn_user_mutex);
49844+ if (learn_buffer == NULL)
49845+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49846+ if (learn_buffer_user == NULL)
49847+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49848+ if (learn_buffer == NULL) {
49849+ retval = -ENOMEM;
49850+ goto out_error;
49851+ }
49852+ if (learn_buffer_user == NULL) {
49853+ retval = -ENOMEM;
49854+ goto out_error;
49855+ }
49856+ learn_buffer_len = 0;
49857+ learn_buffer_user_len = 0;
49858+ gr_learn_attached = 1;
49859+out_error:
49860+ mutex_unlock(&gr_learn_user_mutex);
49861+ return retval;
49862+ }
49863+ return 0;
49864+}
49865+
49866+static int
49867+close_learn(struct inode *inode, struct file *file)
49868+{
49869+ if (file->f_mode & FMODE_READ) {
49870+ char *tmp = NULL;
49871+ mutex_lock(&gr_learn_user_mutex);
49872+ spin_lock(&gr_learn_lock);
49873+ tmp = learn_buffer;
49874+ learn_buffer = NULL;
49875+ spin_unlock(&gr_learn_lock);
49876+ if (tmp)
49877+ vfree(tmp);
49878+ if (learn_buffer_user != NULL) {
49879+ vfree(learn_buffer_user);
49880+ learn_buffer_user = NULL;
49881+ }
49882+ learn_buffer_len = 0;
49883+ learn_buffer_user_len = 0;
49884+ gr_learn_attached = 0;
49885+ mutex_unlock(&gr_learn_user_mutex);
49886+ }
49887+
49888+ return 0;
49889+}
49890+
49891+const struct file_operations grsec_fops = {
49892+ .read = read_learn,
49893+ .write = write_grsec_handler,
49894+ .open = open_learn,
49895+ .release = close_learn,
49896+ .poll = poll_learn,
49897+};
49898diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
49899--- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49900+++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
49901@@ -0,0 +1,68 @@
49902+#include <linux/kernel.h>
49903+#include <linux/sched.h>
49904+#include <linux/gracl.h>
49905+#include <linux/grinternal.h>
49906+
49907+static const char *restab_log[] = {
49908+ [RLIMIT_CPU] = "RLIMIT_CPU",
49909+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49910+ [RLIMIT_DATA] = "RLIMIT_DATA",
49911+ [RLIMIT_STACK] = "RLIMIT_STACK",
49912+ [RLIMIT_CORE] = "RLIMIT_CORE",
49913+ [RLIMIT_RSS] = "RLIMIT_RSS",
49914+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
49915+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49916+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49917+ [RLIMIT_AS] = "RLIMIT_AS",
49918+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49919+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49920+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49921+ [RLIMIT_NICE] = "RLIMIT_NICE",
49922+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49923+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49924+ [GR_CRASH_RES] = "RLIMIT_CRASH"
49925+};
49926+
49927+void
49928+gr_log_resource(const struct task_struct *task,
49929+ const int res, const unsigned long wanted, const int gt)
49930+{
49931+ const struct cred *cred;
49932+ unsigned long rlim;
49933+
49934+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
49935+ return;
49936+
49937+ // not yet supported resource
49938+ if (unlikely(!restab_log[res]))
49939+ return;
49940+
49941+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49942+ rlim = task_rlimit_max(task, res);
49943+ else
49944+ rlim = task_rlimit(task, res);
49945+
49946+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49947+ return;
49948+
49949+ rcu_read_lock();
49950+ cred = __task_cred(task);
49951+
49952+ if (res == RLIMIT_NPROC &&
49953+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49954+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49955+ goto out_rcu_unlock;
49956+ else if (res == RLIMIT_MEMLOCK &&
49957+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49958+ goto out_rcu_unlock;
49959+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49960+ goto out_rcu_unlock;
49961+ rcu_read_unlock();
49962+
49963+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49964+
49965+ return;
49966+out_rcu_unlock:
49967+ rcu_read_unlock();
49968+ return;
49969+}
49970diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
49971--- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49972+++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
49973@@ -0,0 +1,299 @@
49974+#include <linux/kernel.h>
49975+#include <linux/mm.h>
49976+#include <asm/uaccess.h>
49977+#include <asm/errno.h>
49978+#include <asm/mman.h>
49979+#include <net/sock.h>
49980+#include <linux/file.h>
49981+#include <linux/fs.h>
49982+#include <linux/net.h>
49983+#include <linux/in.h>
49984+#include <linux/slab.h>
49985+#include <linux/types.h>
49986+#include <linux/sched.h>
49987+#include <linux/timer.h>
49988+#include <linux/gracl.h>
49989+#include <linux/grsecurity.h>
49990+#include <linux/grinternal.h>
49991+
49992+static struct crash_uid *uid_set;
49993+static unsigned short uid_used;
49994+static DEFINE_SPINLOCK(gr_uid_lock);
49995+extern rwlock_t gr_inode_lock;
49996+extern struct acl_subject_label *
49997+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49998+ struct acl_role_label *role);
49999+
50000+#ifdef CONFIG_BTRFS_FS
50001+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50002+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50003+#endif
50004+
50005+static inline dev_t __get_dev(const struct dentry *dentry)
50006+{
50007+#ifdef CONFIG_BTRFS_FS
50008+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50009+ return get_btrfs_dev_from_inode(dentry->d_inode);
50010+ else
50011+#endif
50012+ return dentry->d_inode->i_sb->s_dev;
50013+}
50014+
50015+int
50016+gr_init_uidset(void)
50017+{
50018+ uid_set =
50019+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
50020+ uid_used = 0;
50021+
50022+ return uid_set ? 1 : 0;
50023+}
50024+
50025+void
50026+gr_free_uidset(void)
50027+{
50028+ if (uid_set)
50029+ kfree(uid_set);
50030+
50031+ return;
50032+}
50033+
50034+int
50035+gr_find_uid(const uid_t uid)
50036+{
50037+ struct crash_uid *tmp = uid_set;
50038+ uid_t buid;
50039+ int low = 0, high = uid_used - 1, mid;
50040+
50041+ while (high >= low) {
50042+ mid = (low + high) >> 1;
50043+ buid = tmp[mid].uid;
50044+ if (buid == uid)
50045+ return mid;
50046+ if (buid > uid)
50047+ high = mid - 1;
50048+ if (buid < uid)
50049+ low = mid + 1;
50050+ }
50051+
50052+ return -1;
50053+}
50054+
50055+static __inline__ void
50056+gr_insertsort(void)
50057+{
50058+ unsigned short i, j;
50059+ struct crash_uid index;
50060+
50061+ for (i = 1; i < uid_used; i++) {
50062+ index = uid_set[i];
50063+ j = i;
50064+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
50065+ uid_set[j] = uid_set[j - 1];
50066+ j--;
50067+ }
50068+ uid_set[j] = index;
50069+ }
50070+
50071+ return;
50072+}
50073+
50074+static __inline__ void
50075+gr_insert_uid(const uid_t uid, const unsigned long expires)
50076+{
50077+ int loc;
50078+
50079+ if (uid_used == GR_UIDTABLE_MAX)
50080+ return;
50081+
50082+ loc = gr_find_uid(uid);
50083+
50084+ if (loc >= 0) {
50085+ uid_set[loc].expires = expires;
50086+ return;
50087+ }
50088+
50089+ uid_set[uid_used].uid = uid;
50090+ uid_set[uid_used].expires = expires;
50091+ uid_used++;
50092+
50093+ gr_insertsort();
50094+
50095+ return;
50096+}
50097+
50098+void
50099+gr_remove_uid(const unsigned short loc)
50100+{
50101+ unsigned short i;
50102+
50103+ for (i = loc + 1; i < uid_used; i++)
50104+ uid_set[i - 1] = uid_set[i];
50105+
50106+ uid_used--;
50107+
50108+ return;
50109+}
50110+
50111+int
50112+gr_check_crash_uid(const uid_t uid)
50113+{
50114+ int loc;
50115+ int ret = 0;
50116+
50117+ if (unlikely(!gr_acl_is_enabled()))
50118+ return 0;
50119+
50120+ spin_lock(&gr_uid_lock);
50121+ loc = gr_find_uid(uid);
50122+
50123+ if (loc < 0)
50124+ goto out_unlock;
50125+
50126+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
50127+ gr_remove_uid(loc);
50128+ else
50129+ ret = 1;
50130+
50131+out_unlock:
50132+ spin_unlock(&gr_uid_lock);
50133+ return ret;
50134+}
50135+
50136+static __inline__ int
50137+proc_is_setxid(const struct cred *cred)
50138+{
50139+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
50140+ cred->uid != cred->fsuid)
50141+ return 1;
50142+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
50143+ cred->gid != cred->fsgid)
50144+ return 1;
50145+
50146+ return 0;
50147+}
50148+
50149+extern int gr_fake_force_sig(int sig, struct task_struct *t);
50150+
50151+void
50152+gr_handle_crash(struct task_struct *task, const int sig)
50153+{
50154+ struct acl_subject_label *curr;
50155+ struct acl_subject_label *curr2;
50156+ struct task_struct *tsk, *tsk2;
50157+ const struct cred *cred;
50158+ const struct cred *cred2;
50159+
50160+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
50161+ return;
50162+
50163+ if (unlikely(!gr_acl_is_enabled()))
50164+ return;
50165+
50166+ curr = task->acl;
50167+
50168+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
50169+ return;
50170+
50171+ if (time_before_eq(curr->expires, get_seconds())) {
50172+ curr->expires = 0;
50173+ curr->crashes = 0;
50174+ }
50175+
50176+ curr->crashes++;
50177+
50178+ if (!curr->expires)
50179+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
50180+
50181+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50182+ time_after(curr->expires, get_seconds())) {
50183+ rcu_read_lock();
50184+ cred = __task_cred(task);
50185+ if (cred->uid && proc_is_setxid(cred)) {
50186+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50187+ spin_lock(&gr_uid_lock);
50188+ gr_insert_uid(cred->uid, curr->expires);
50189+ spin_unlock(&gr_uid_lock);
50190+ curr->expires = 0;
50191+ curr->crashes = 0;
50192+ read_lock(&tasklist_lock);
50193+ do_each_thread(tsk2, tsk) {
50194+ cred2 = __task_cred(tsk);
50195+ if (tsk != task && cred2->uid == cred->uid)
50196+ gr_fake_force_sig(SIGKILL, tsk);
50197+ } while_each_thread(tsk2, tsk);
50198+ read_unlock(&tasklist_lock);
50199+ } else {
50200+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50201+ read_lock(&tasklist_lock);
50202+ do_each_thread(tsk2, tsk) {
50203+ if (likely(tsk != task)) {
50204+ curr2 = tsk->acl;
50205+
50206+ if (curr2->device == curr->device &&
50207+ curr2->inode == curr->inode)
50208+ gr_fake_force_sig(SIGKILL, tsk);
50209+ }
50210+ } while_each_thread(tsk2, tsk);
50211+ read_unlock(&tasklist_lock);
50212+ }
50213+ rcu_read_unlock();
50214+ }
50215+
50216+ return;
50217+}
50218+
50219+int
50220+gr_check_crash_exec(const struct file *filp)
50221+{
50222+ struct acl_subject_label *curr;
50223+
50224+ if (unlikely(!gr_acl_is_enabled()))
50225+ return 0;
50226+
50227+ read_lock(&gr_inode_lock);
50228+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
50229+ __get_dev(filp->f_path.dentry),
50230+ current->role);
50231+ read_unlock(&gr_inode_lock);
50232+
50233+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
50234+ (!curr->crashes && !curr->expires))
50235+ return 0;
50236+
50237+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50238+ time_after(curr->expires, get_seconds()))
50239+ return 1;
50240+ else if (time_before_eq(curr->expires, get_seconds())) {
50241+ curr->crashes = 0;
50242+ curr->expires = 0;
50243+ }
50244+
50245+ return 0;
50246+}
50247+
50248+void
50249+gr_handle_alertkill(struct task_struct *task)
50250+{
50251+ struct acl_subject_label *curracl;
50252+ __u32 curr_ip;
50253+ struct task_struct *p, *p2;
50254+
50255+ if (unlikely(!gr_acl_is_enabled()))
50256+ return;
50257+
50258+ curracl = task->acl;
50259+ curr_ip = task->signal->curr_ip;
50260+
50261+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
50262+ read_lock(&tasklist_lock);
50263+ do_each_thread(p2, p) {
50264+ if (p->signal->curr_ip == curr_ip)
50265+ gr_fake_force_sig(SIGKILL, p);
50266+ } while_each_thread(p2, p);
50267+ read_unlock(&tasklist_lock);
50268+ } else if (curracl->mode & GR_KILLPROC)
50269+ gr_fake_force_sig(SIGKILL, task);
50270+
50271+ return;
50272+}
50273diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
50274--- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
50275+++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
50276@@ -0,0 +1,40 @@
50277+#include <linux/kernel.h>
50278+#include <linux/mm.h>
50279+#include <linux/sched.h>
50280+#include <linux/file.h>
50281+#include <linux/ipc.h>
50282+#include <linux/gracl.h>
50283+#include <linux/grsecurity.h>
50284+#include <linux/grinternal.h>
50285+
50286+int
50287+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50288+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50289+{
50290+ struct task_struct *task;
50291+
50292+ if (!gr_acl_is_enabled())
50293+ return 1;
50294+
50295+ rcu_read_lock();
50296+ read_lock(&tasklist_lock);
50297+
50298+ task = find_task_by_vpid(shm_cprid);
50299+
50300+ if (unlikely(!task))
50301+ task = find_task_by_vpid(shm_lapid);
50302+
50303+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
50304+ (task->pid == shm_lapid)) &&
50305+ (task->acl->mode & GR_PROTSHM) &&
50306+ (task->acl != current->acl))) {
50307+ read_unlock(&tasklist_lock);
50308+ rcu_read_unlock();
50309+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
50310+ return 0;
50311+ }
50312+ read_unlock(&tasklist_lock);
50313+ rcu_read_unlock();
50314+
50315+ return 1;
50316+}
50317diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
50318--- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
50319+++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
50320@@ -0,0 +1,19 @@
50321+#include <linux/kernel.h>
50322+#include <linux/sched.h>
50323+#include <linux/fs.h>
50324+#include <linux/file.h>
50325+#include <linux/grsecurity.h>
50326+#include <linux/grinternal.h>
50327+
50328+void
50329+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
50330+{
50331+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50332+ if ((grsec_enable_chdir && grsec_enable_group &&
50333+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
50334+ !grsec_enable_group)) {
50335+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
50336+ }
50337+#endif
50338+ return;
50339+}
50340diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
50341--- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
50342+++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
50343@@ -0,0 +1,351 @@
50344+#include <linux/kernel.h>
50345+#include <linux/module.h>
50346+#include <linux/sched.h>
50347+#include <linux/file.h>
50348+#include <linux/fs.h>
50349+#include <linux/mount.h>
50350+#include <linux/types.h>
50351+#include <linux/pid_namespace.h>
50352+#include <linux/grsecurity.h>
50353+#include <linux/grinternal.h>
50354+
50355+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
50356+{
50357+#ifdef CONFIG_GRKERNSEC
50358+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
50359+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
50360+ task->gr_is_chrooted = 1;
50361+ else
50362+ task->gr_is_chrooted = 0;
50363+
50364+ task->gr_chroot_dentry = path->dentry;
50365+#endif
50366+ return;
50367+}
50368+
50369+void gr_clear_chroot_entries(struct task_struct *task)
50370+{
50371+#ifdef CONFIG_GRKERNSEC
50372+ task->gr_is_chrooted = 0;
50373+ task->gr_chroot_dentry = NULL;
50374+#endif
50375+ return;
50376+}
50377+
50378+int
50379+gr_handle_chroot_unix(const pid_t pid)
50380+{
50381+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50382+ struct task_struct *p;
50383+
50384+ if (unlikely(!grsec_enable_chroot_unix))
50385+ return 1;
50386+
50387+ if (likely(!proc_is_chrooted(current)))
50388+ return 1;
50389+
50390+ rcu_read_lock();
50391+ read_lock(&tasklist_lock);
50392+ p = find_task_by_vpid_unrestricted(pid);
50393+ if (unlikely(p && !have_same_root(current, p))) {
50394+ read_unlock(&tasklist_lock);
50395+ rcu_read_unlock();
50396+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
50397+ return 0;
50398+ }
50399+ read_unlock(&tasklist_lock);
50400+ rcu_read_unlock();
50401+#endif
50402+ return 1;
50403+}
50404+
50405+int
50406+gr_handle_chroot_nice(void)
50407+{
50408+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50409+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
50410+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
50411+ return -EPERM;
50412+ }
50413+#endif
50414+ return 0;
50415+}
50416+
50417+int
50418+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
50419+{
50420+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50421+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
50422+ && proc_is_chrooted(current)) {
50423+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
50424+ return -EACCES;
50425+ }
50426+#endif
50427+ return 0;
50428+}
50429+
50430+int
50431+gr_handle_chroot_rawio(const struct inode *inode)
50432+{
50433+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50434+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50435+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
50436+ return 1;
50437+#endif
50438+ return 0;
50439+}
50440+
50441+int
50442+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
50443+{
50444+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50445+ struct task_struct *p;
50446+ int ret = 0;
50447+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
50448+ return ret;
50449+
50450+ read_lock(&tasklist_lock);
50451+ do_each_pid_task(pid, type, p) {
50452+ if (!have_same_root(current, p)) {
50453+ ret = 1;
50454+ goto out;
50455+ }
50456+ } while_each_pid_task(pid, type, p);
50457+out:
50458+ read_unlock(&tasklist_lock);
50459+ return ret;
50460+#endif
50461+ return 0;
50462+}
50463+
50464+int
50465+gr_pid_is_chrooted(struct task_struct *p)
50466+{
50467+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50468+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
50469+ return 0;
50470+
50471+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
50472+ !have_same_root(current, p)) {
50473+ return 1;
50474+ }
50475+#endif
50476+ return 0;
50477+}
50478+
50479+EXPORT_SYMBOL(gr_pid_is_chrooted);
50480+
50481+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
50482+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
50483+{
50484+ struct path path, currentroot;
50485+ int ret = 0;
50486+
50487+ path.dentry = (struct dentry *)u_dentry;
50488+ path.mnt = (struct vfsmount *)u_mnt;
50489+ get_fs_root(current->fs, &currentroot);
50490+ if (path_is_under(&path, &currentroot))
50491+ ret = 1;
50492+ path_put(&currentroot);
50493+
50494+ return ret;
50495+}
50496+#endif
50497+
50498+int
50499+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
50500+{
50501+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50502+ if (!grsec_enable_chroot_fchdir)
50503+ return 1;
50504+
50505+ if (!proc_is_chrooted(current))
50506+ return 1;
50507+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50508+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50509+ return 0;
50510+ }
50511+#endif
50512+ return 1;
50513+}
50514+
50515+int
50516+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50517+ const time_t shm_createtime)
50518+{
50519+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50520+ struct task_struct *p;
50521+ time_t starttime;
50522+
50523+ if (unlikely(!grsec_enable_chroot_shmat))
50524+ return 1;
50525+
50526+ if (likely(!proc_is_chrooted(current)))
50527+ return 1;
50528+
50529+ rcu_read_lock();
50530+ read_lock(&tasklist_lock);
50531+
50532+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50533+ starttime = p->start_time.tv_sec;
50534+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
50535+ if (have_same_root(current, p)) {
50536+ goto allow;
50537+ } else {
50538+ read_unlock(&tasklist_lock);
50539+ rcu_read_unlock();
50540+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50541+ return 0;
50542+ }
50543+ }
50544+ /* creator exited, pid reuse, fall through to next check */
50545+ }
50546+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50547+ if (unlikely(!have_same_root(current, p))) {
50548+ read_unlock(&tasklist_lock);
50549+ rcu_read_unlock();
50550+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50551+ return 0;
50552+ }
50553+ }
50554+
50555+allow:
50556+ read_unlock(&tasklist_lock);
50557+ rcu_read_unlock();
50558+#endif
50559+ return 1;
50560+}
50561+
50562+void
50563+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50564+{
50565+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50566+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50567+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50568+#endif
50569+ return;
50570+}
50571+
50572+int
50573+gr_handle_chroot_mknod(const struct dentry *dentry,
50574+ const struct vfsmount *mnt, const int mode)
50575+{
50576+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50577+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50578+ proc_is_chrooted(current)) {
50579+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50580+ return -EPERM;
50581+ }
50582+#endif
50583+ return 0;
50584+}
50585+
50586+int
50587+gr_handle_chroot_mount(const struct dentry *dentry,
50588+ const struct vfsmount *mnt, const char *dev_name)
50589+{
50590+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50591+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50592+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
50593+ return -EPERM;
50594+ }
50595+#endif
50596+ return 0;
50597+}
50598+
50599+int
50600+gr_handle_chroot_pivot(void)
50601+{
50602+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50603+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50604+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50605+ return -EPERM;
50606+ }
50607+#endif
50608+ return 0;
50609+}
50610+
50611+int
50612+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50613+{
50614+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50615+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50616+ !gr_is_outside_chroot(dentry, mnt)) {
50617+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50618+ return -EPERM;
50619+ }
50620+#endif
50621+ return 0;
50622+}
50623+
50624+extern const char *captab_log[];
50625+extern int captab_log_entries;
50626+
50627+int
50628+gr_chroot_is_capable(const int cap)
50629+{
50630+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50631+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
50632+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50633+ if (cap_raised(chroot_caps, cap)) {
50634+ const struct cred *creds = current_cred();
50635+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
50636+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
50637+ }
50638+ return 0;
50639+ }
50640+ }
50641+#endif
50642+ return 1;
50643+}
50644+
50645+int
50646+gr_chroot_is_capable_nolog(const int cap)
50647+{
50648+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50649+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
50650+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50651+ if (cap_raised(chroot_caps, cap)) {
50652+ return 0;
50653+ }
50654+ }
50655+#endif
50656+ return 1;
50657+}
50658+
50659+int
50660+gr_handle_chroot_sysctl(const int op)
50661+{
50662+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50663+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
50664+ proc_is_chrooted(current))
50665+ return -EACCES;
50666+#endif
50667+ return 0;
50668+}
50669+
50670+void
50671+gr_handle_chroot_chdir(struct path *path)
50672+{
50673+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50674+ if (grsec_enable_chroot_chdir)
50675+ set_fs_pwd(current->fs, path);
50676+#endif
50677+ return;
50678+}
50679+
50680+int
50681+gr_handle_chroot_chmod(const struct dentry *dentry,
50682+ const struct vfsmount *mnt, const int mode)
50683+{
50684+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50685+ /* allow chmod +s on directories, but not files */
50686+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50687+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50688+ proc_is_chrooted(current)) {
50689+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50690+ return -EPERM;
50691+ }
50692+#endif
50693+ return 0;
50694+}
50695diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
50696--- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50697+++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
50698@@ -0,0 +1,447 @@
50699+#include <linux/kernel.h>
50700+#include <linux/module.h>
50701+#include <linux/sched.h>
50702+#include <linux/file.h>
50703+#include <linux/fs.h>
50704+#include <linux/kdev_t.h>
50705+#include <linux/net.h>
50706+#include <linux/in.h>
50707+#include <linux/ip.h>
50708+#include <linux/skbuff.h>
50709+#include <linux/sysctl.h>
50710+
50711+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50712+void
50713+pax_set_initial_flags(struct linux_binprm *bprm)
50714+{
50715+ return;
50716+}
50717+#endif
50718+
50719+#ifdef CONFIG_SYSCTL
50720+__u32
50721+gr_handle_sysctl(const struct ctl_table * table, const int op)
50722+{
50723+ return 0;
50724+}
50725+#endif
50726+
50727+#ifdef CONFIG_TASKSTATS
50728+int gr_is_taskstats_denied(int pid)
50729+{
50730+ return 0;
50731+}
50732+#endif
50733+
50734+int
50735+gr_acl_is_enabled(void)
50736+{
50737+ return 0;
50738+}
50739+
50740+int
50741+gr_handle_rawio(const struct inode *inode)
50742+{
50743+ return 0;
50744+}
50745+
50746+void
50747+gr_acl_handle_psacct(struct task_struct *task, const long code)
50748+{
50749+ return;
50750+}
50751+
50752+int
50753+gr_handle_ptrace(struct task_struct *task, const long request)
50754+{
50755+ return 0;
50756+}
50757+
50758+int
50759+gr_handle_proc_ptrace(struct task_struct *task)
50760+{
50761+ return 0;
50762+}
50763+
50764+void
50765+gr_learn_resource(const struct task_struct *task,
50766+ const int res, const unsigned long wanted, const int gt)
50767+{
50768+ return;
50769+}
50770+
50771+int
50772+gr_set_acls(const int type)
50773+{
50774+ return 0;
50775+}
50776+
50777+int
50778+gr_check_hidden_task(const struct task_struct *tsk)
50779+{
50780+ return 0;
50781+}
50782+
50783+int
50784+gr_check_protected_task(const struct task_struct *task)
50785+{
50786+ return 0;
50787+}
50788+
50789+int
50790+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50791+{
50792+ return 0;
50793+}
50794+
50795+void
50796+gr_copy_label(struct task_struct *tsk)
50797+{
50798+ return;
50799+}
50800+
50801+void
50802+gr_set_pax_flags(struct task_struct *task)
50803+{
50804+ return;
50805+}
50806+
50807+int
50808+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50809+ const int unsafe_share)
50810+{
50811+ return 0;
50812+}
50813+
50814+void
50815+gr_handle_delete(const ino_t ino, const dev_t dev)
50816+{
50817+ return;
50818+}
50819+
50820+void
50821+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50822+{
50823+ return;
50824+}
50825+
50826+void
50827+gr_handle_crash(struct task_struct *task, const int sig)
50828+{
50829+ return;
50830+}
50831+
50832+int
50833+gr_check_crash_exec(const struct file *filp)
50834+{
50835+ return 0;
50836+}
50837+
50838+int
50839+gr_check_crash_uid(const uid_t uid)
50840+{
50841+ return 0;
50842+}
50843+
50844+void
50845+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50846+ struct dentry *old_dentry,
50847+ struct dentry *new_dentry,
50848+ struct vfsmount *mnt, const __u8 replace)
50849+{
50850+ return;
50851+}
50852+
50853+int
50854+gr_search_socket(const int family, const int type, const int protocol)
50855+{
50856+ return 1;
50857+}
50858+
50859+int
50860+gr_search_connectbind(const int mode, const struct socket *sock,
50861+ const struct sockaddr_in *addr)
50862+{
50863+ return 0;
50864+}
50865+
50866+int
50867+gr_is_capable(const int cap)
50868+{
50869+ return 1;
50870+}
50871+
50872+int
50873+gr_is_capable_nolog(const int cap)
50874+{
50875+ return 1;
50876+}
50877+
50878+void
50879+gr_handle_alertkill(struct task_struct *task)
50880+{
50881+ return;
50882+}
50883+
50884+__u32
50885+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50886+{
50887+ return 1;
50888+}
50889+
50890+__u32
50891+gr_acl_handle_hidden_file(const struct dentry * dentry,
50892+ const struct vfsmount * mnt)
50893+{
50894+ return 1;
50895+}
50896+
50897+__u32
50898+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50899+ const int fmode)
50900+{
50901+ return 1;
50902+}
50903+
50904+__u32
50905+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50906+{
50907+ return 1;
50908+}
50909+
50910+__u32
50911+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50912+{
50913+ return 1;
50914+}
50915+
50916+int
50917+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50918+ unsigned int *vm_flags)
50919+{
50920+ return 1;
50921+}
50922+
50923+__u32
50924+gr_acl_handle_truncate(const struct dentry * dentry,
50925+ const struct vfsmount * mnt)
50926+{
50927+ return 1;
50928+}
50929+
50930+__u32
50931+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50932+{
50933+ return 1;
50934+}
50935+
50936+__u32
50937+gr_acl_handle_access(const struct dentry * dentry,
50938+ const struct vfsmount * mnt, const int fmode)
50939+{
50940+ return 1;
50941+}
50942+
50943+__u32
50944+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50945+ mode_t mode)
50946+{
50947+ return 1;
50948+}
50949+
50950+__u32
50951+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50952+ mode_t mode)
50953+{
50954+ return 1;
50955+}
50956+
50957+__u32
50958+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50959+{
50960+ return 1;
50961+}
50962+
50963+__u32
50964+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50965+{
50966+ return 1;
50967+}
50968+
50969+void
50970+grsecurity_init(void)
50971+{
50972+ return;
50973+}
50974+
50975+__u32
50976+gr_acl_handle_mknod(const struct dentry * new_dentry,
50977+ const struct dentry * parent_dentry,
50978+ const struct vfsmount * parent_mnt,
50979+ const int mode)
50980+{
50981+ return 1;
50982+}
50983+
50984+__u32
50985+gr_acl_handle_mkdir(const struct dentry * new_dentry,
50986+ const struct dentry * parent_dentry,
50987+ const struct vfsmount * parent_mnt)
50988+{
50989+ return 1;
50990+}
50991+
50992+__u32
50993+gr_acl_handle_symlink(const struct dentry * new_dentry,
50994+ const struct dentry * parent_dentry,
50995+ const struct vfsmount * parent_mnt, const char *from)
50996+{
50997+ return 1;
50998+}
50999+
51000+__u32
51001+gr_acl_handle_link(const struct dentry * new_dentry,
51002+ const struct dentry * parent_dentry,
51003+ const struct vfsmount * parent_mnt,
51004+ const struct dentry * old_dentry,
51005+ const struct vfsmount * old_mnt, const char *to)
51006+{
51007+ return 1;
51008+}
51009+
51010+int
51011+gr_acl_handle_rename(const struct dentry *new_dentry,
51012+ const struct dentry *parent_dentry,
51013+ const struct vfsmount *parent_mnt,
51014+ const struct dentry *old_dentry,
51015+ const struct inode *old_parent_inode,
51016+ const struct vfsmount *old_mnt, const char *newname)
51017+{
51018+ return 0;
51019+}
51020+
51021+int
51022+gr_acl_handle_filldir(const struct file *file, const char *name,
51023+ const int namelen, const ino_t ino)
51024+{
51025+ return 1;
51026+}
51027+
51028+int
51029+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51030+ const time_t shm_createtime, const uid_t cuid, const int shmid)
51031+{
51032+ return 1;
51033+}
51034+
51035+int
51036+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
51037+{
51038+ return 0;
51039+}
51040+
51041+int
51042+gr_search_accept(const struct socket *sock)
51043+{
51044+ return 0;
51045+}
51046+
51047+int
51048+gr_search_listen(const struct socket *sock)
51049+{
51050+ return 0;
51051+}
51052+
51053+int
51054+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
51055+{
51056+ return 0;
51057+}
51058+
51059+__u32
51060+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
51061+{
51062+ return 1;
51063+}
51064+
51065+__u32
51066+gr_acl_handle_creat(const struct dentry * dentry,
51067+ const struct dentry * p_dentry,
51068+ const struct vfsmount * p_mnt, const int fmode,
51069+ const int imode)
51070+{
51071+ return 1;
51072+}
51073+
51074+void
51075+gr_acl_handle_exit(void)
51076+{
51077+ return;
51078+}
51079+
51080+int
51081+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51082+{
51083+ return 1;
51084+}
51085+
51086+void
51087+gr_set_role_label(const uid_t uid, const gid_t gid)
51088+{
51089+ return;
51090+}
51091+
51092+int
51093+gr_acl_handle_procpidmem(const struct task_struct *task)
51094+{
51095+ return 0;
51096+}
51097+
51098+int
51099+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
51100+{
51101+ return 0;
51102+}
51103+
51104+int
51105+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
51106+{
51107+ return 0;
51108+}
51109+
51110+void
51111+gr_set_kernel_label(struct task_struct *task)
51112+{
51113+ return;
51114+}
51115+
51116+int
51117+gr_check_user_change(int real, int effective, int fs)
51118+{
51119+ return 0;
51120+}
51121+
51122+int
51123+gr_check_group_change(int real, int effective, int fs)
51124+{
51125+ return 0;
51126+}
51127+
51128+int gr_acl_enable_at_secure(void)
51129+{
51130+ return 0;
51131+}
51132+
51133+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51134+{
51135+ return dentry->d_inode->i_sb->s_dev;
51136+}
51137+
51138+EXPORT_SYMBOL(gr_is_capable);
51139+EXPORT_SYMBOL(gr_is_capable_nolog);
51140+EXPORT_SYMBOL(gr_learn_resource);
51141+EXPORT_SYMBOL(gr_set_kernel_label);
51142+#ifdef CONFIG_SECURITY
51143+EXPORT_SYMBOL(gr_check_user_change);
51144+EXPORT_SYMBOL(gr_check_group_change);
51145+#endif
51146diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
51147--- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
51148+++ linux-3.0.4/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
51149@@ -0,0 +1,145 @@
51150+#include <linux/kernel.h>
51151+#include <linux/sched.h>
51152+#include <linux/file.h>
51153+#include <linux/binfmts.h>
51154+#include <linux/fs.h>
51155+#include <linux/types.h>
51156+#include <linux/grdefs.h>
51157+#include <linux/grsecurity.h>
51158+#include <linux/grinternal.h>
51159+#include <linux/capability.h>
51160+#include <linux/module.h>
51161+
51162+#include <asm/uaccess.h>
51163+
51164+#ifdef CONFIG_GRKERNSEC_EXECLOG
51165+static char gr_exec_arg_buf[132];
51166+static DEFINE_MUTEX(gr_exec_arg_mutex);
51167+#endif
51168+
51169+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
51170+
51171+void
51172+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
51173+{
51174+#ifdef CONFIG_GRKERNSEC_EXECLOG
51175+ char *grarg = gr_exec_arg_buf;
51176+ unsigned int i, x, execlen = 0;
51177+ char c;
51178+
51179+ if (!((grsec_enable_execlog && grsec_enable_group &&
51180+ in_group_p(grsec_audit_gid))
51181+ || (grsec_enable_execlog && !grsec_enable_group)))
51182+ return;
51183+
51184+ mutex_lock(&gr_exec_arg_mutex);
51185+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
51186+
51187+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
51188+ const char __user *p;
51189+ unsigned int len;
51190+
51191+ p = get_user_arg_ptr(argv, i);
51192+ if (IS_ERR(p))
51193+ goto log;
51194+
51195+ len = strnlen_user(p, 128 - execlen);
51196+ if (len > 128 - execlen)
51197+ len = 128 - execlen;
51198+ else if (len > 0)
51199+ len--;
51200+ if (copy_from_user(grarg + execlen, p, len))
51201+ goto log;
51202+
51203+ /* rewrite unprintable characters */
51204+ for (x = 0; x < len; x++) {
51205+ c = *(grarg + execlen + x);
51206+ if (c < 32 || c > 126)
51207+ *(grarg + execlen + x) = ' ';
51208+ }
51209+
51210+ execlen += len;
51211+ *(grarg + execlen) = ' ';
51212+ *(grarg + execlen + 1) = '\0';
51213+ execlen++;
51214+ }
51215+
51216+ log:
51217+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51218+ bprm->file->f_path.mnt, grarg);
51219+ mutex_unlock(&gr_exec_arg_mutex);
51220+#endif
51221+ return;
51222+}
51223+
51224+#ifdef CONFIG_GRKERNSEC
51225+extern int gr_acl_is_capable(const int cap);
51226+extern int gr_acl_is_capable_nolog(const int cap);
51227+extern int gr_chroot_is_capable(const int cap);
51228+extern int gr_chroot_is_capable_nolog(const int cap);
51229+#endif
51230+
51231+const char *captab_log[] = {
51232+ "CAP_CHOWN",
51233+ "CAP_DAC_OVERRIDE",
51234+ "CAP_DAC_READ_SEARCH",
51235+ "CAP_FOWNER",
51236+ "CAP_FSETID",
51237+ "CAP_KILL",
51238+ "CAP_SETGID",
51239+ "CAP_SETUID",
51240+ "CAP_SETPCAP",
51241+ "CAP_LINUX_IMMUTABLE",
51242+ "CAP_NET_BIND_SERVICE",
51243+ "CAP_NET_BROADCAST",
51244+ "CAP_NET_ADMIN",
51245+ "CAP_NET_RAW",
51246+ "CAP_IPC_LOCK",
51247+ "CAP_IPC_OWNER",
51248+ "CAP_SYS_MODULE",
51249+ "CAP_SYS_RAWIO",
51250+ "CAP_SYS_CHROOT",
51251+ "CAP_SYS_PTRACE",
51252+ "CAP_SYS_PACCT",
51253+ "CAP_SYS_ADMIN",
51254+ "CAP_SYS_BOOT",
51255+ "CAP_SYS_NICE",
51256+ "CAP_SYS_RESOURCE",
51257+ "CAP_SYS_TIME",
51258+ "CAP_SYS_TTY_CONFIG",
51259+ "CAP_MKNOD",
51260+ "CAP_LEASE",
51261+ "CAP_AUDIT_WRITE",
51262+ "CAP_AUDIT_CONTROL",
51263+ "CAP_SETFCAP",
51264+ "CAP_MAC_OVERRIDE",
51265+ "CAP_MAC_ADMIN",
51266+ "CAP_SYSLOG"
51267+};
51268+
51269+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
51270+
51271+int gr_is_capable(const int cap)
51272+{
51273+#ifdef CONFIG_GRKERNSEC
51274+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
51275+ return 1;
51276+ return 0;
51277+#else
51278+ return 1;
51279+#endif
51280+}
51281+
51282+int gr_is_capable_nolog(const int cap)
51283+{
51284+#ifdef CONFIG_GRKERNSEC
51285+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
51286+ return 1;
51287+ return 0;
51288+#else
51289+ return 1;
51290+#endif
51291+}
51292+
51293+EXPORT_SYMBOL(gr_is_capable);
51294+EXPORT_SYMBOL(gr_is_capable_nolog);
51295diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
51296--- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
51297+++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
51298@@ -0,0 +1,24 @@
51299+#include <linux/kernel.h>
51300+#include <linux/sched.h>
51301+#include <linux/fs.h>
51302+#include <linux/file.h>
51303+#include <linux/grinternal.h>
51304+
51305+int
51306+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
51307+ const struct dentry *dir, const int flag, const int acc_mode)
51308+{
51309+#ifdef CONFIG_GRKERNSEC_FIFO
51310+ const struct cred *cred = current_cred();
51311+
51312+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
51313+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
51314+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
51315+ (cred->fsuid != dentry->d_inode->i_uid)) {
51316+ if (!inode_permission(dentry->d_inode, acc_mode))
51317+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
51318+ return -EACCES;
51319+ }
51320+#endif
51321+ return 0;
51322+}
51323diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
51324--- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
51325+++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
51326@@ -0,0 +1,23 @@
51327+#include <linux/kernel.h>
51328+#include <linux/sched.h>
51329+#include <linux/grsecurity.h>
51330+#include <linux/grinternal.h>
51331+#include <linux/errno.h>
51332+
51333+void
51334+gr_log_forkfail(const int retval)
51335+{
51336+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51337+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
51338+ switch (retval) {
51339+ case -EAGAIN:
51340+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
51341+ break;
51342+ case -ENOMEM:
51343+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
51344+ break;
51345+ }
51346+ }
51347+#endif
51348+ return;
51349+}
51350diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
51351--- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
51352+++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
51353@@ -0,0 +1,269 @@
51354+#include <linux/kernel.h>
51355+#include <linux/sched.h>
51356+#include <linux/mm.h>
51357+#include <linux/gracl.h>
51358+#include <linux/slab.h>
51359+#include <linux/vmalloc.h>
51360+#include <linux/percpu.h>
51361+#include <linux/module.h>
51362+
51363+int grsec_enable_brute;
51364+int grsec_enable_link;
51365+int grsec_enable_dmesg;
51366+int grsec_enable_harden_ptrace;
51367+int grsec_enable_fifo;
51368+int grsec_enable_execlog;
51369+int grsec_enable_signal;
51370+int grsec_enable_forkfail;
51371+int grsec_enable_audit_ptrace;
51372+int grsec_enable_time;
51373+int grsec_enable_audit_textrel;
51374+int grsec_enable_group;
51375+int grsec_audit_gid;
51376+int grsec_enable_chdir;
51377+int grsec_enable_mount;
51378+int grsec_enable_rofs;
51379+int grsec_enable_chroot_findtask;
51380+int grsec_enable_chroot_mount;
51381+int grsec_enable_chroot_shmat;
51382+int grsec_enable_chroot_fchdir;
51383+int grsec_enable_chroot_double;
51384+int grsec_enable_chroot_pivot;
51385+int grsec_enable_chroot_chdir;
51386+int grsec_enable_chroot_chmod;
51387+int grsec_enable_chroot_mknod;
51388+int grsec_enable_chroot_nice;
51389+int grsec_enable_chroot_execlog;
51390+int grsec_enable_chroot_caps;
51391+int grsec_enable_chroot_sysctl;
51392+int grsec_enable_chroot_unix;
51393+int grsec_enable_tpe;
51394+int grsec_tpe_gid;
51395+int grsec_enable_blackhole;
51396+#ifdef CONFIG_IPV6_MODULE
51397+EXPORT_SYMBOL(grsec_enable_blackhole);
51398+#endif
51399+int grsec_lastack_retries;
51400+int grsec_enable_tpe_all;
51401+int grsec_enable_tpe_invert;
51402+int grsec_enable_socket_all;
51403+int grsec_socket_all_gid;
51404+int grsec_enable_socket_client;
51405+int grsec_socket_client_gid;
51406+int grsec_enable_socket_server;
51407+int grsec_socket_server_gid;
51408+int grsec_resource_logging;
51409+int grsec_disable_privio;
51410+int grsec_enable_log_rwxmaps;
51411+int grsec_lock;
51412+
51413+DEFINE_SPINLOCK(grsec_alert_lock);
51414+unsigned long grsec_alert_wtime = 0;
51415+unsigned long grsec_alert_fyet = 0;
51416+
51417+DEFINE_SPINLOCK(grsec_audit_lock);
51418+
51419+DEFINE_RWLOCK(grsec_exec_file_lock);
51420+
51421+char *gr_shared_page[4];
51422+
51423+char *gr_alert_log_fmt;
51424+char *gr_audit_log_fmt;
51425+char *gr_alert_log_buf;
51426+char *gr_audit_log_buf;
51427+
51428+extern struct gr_arg *gr_usermode;
51429+extern unsigned char *gr_system_salt;
51430+extern unsigned char *gr_system_sum;
51431+
51432+void __init
51433+grsecurity_init(void)
51434+{
51435+ int j;
51436+ /* create the per-cpu shared pages */
51437+
51438+#ifdef CONFIG_X86
51439+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
51440+#endif
51441+
51442+ for (j = 0; j < 4; j++) {
51443+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
51444+ if (gr_shared_page[j] == NULL) {
51445+ panic("Unable to allocate grsecurity shared page");
51446+ return;
51447+ }
51448+ }
51449+
51450+ /* allocate log buffers */
51451+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
51452+ if (!gr_alert_log_fmt) {
51453+ panic("Unable to allocate grsecurity alert log format buffer");
51454+ return;
51455+ }
51456+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
51457+ if (!gr_audit_log_fmt) {
51458+ panic("Unable to allocate grsecurity audit log format buffer");
51459+ return;
51460+ }
51461+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51462+ if (!gr_alert_log_buf) {
51463+ panic("Unable to allocate grsecurity alert log buffer");
51464+ return;
51465+ }
51466+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51467+ if (!gr_audit_log_buf) {
51468+ panic("Unable to allocate grsecurity audit log buffer");
51469+ return;
51470+ }
51471+
51472+ /* allocate memory for authentication structure */
51473+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
51474+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
51475+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
51476+
51477+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
51478+ panic("Unable to allocate grsecurity authentication structure");
51479+ return;
51480+ }
51481+
51482+
51483+#ifdef CONFIG_GRKERNSEC_IO
51484+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
51485+ grsec_disable_privio = 1;
51486+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51487+ grsec_disable_privio = 1;
51488+#else
51489+ grsec_disable_privio = 0;
51490+#endif
51491+#endif
51492+
51493+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51494+ /* for backward compatibility, tpe_invert always defaults to on if
51495+ enabled in the kernel
51496+ */
51497+ grsec_enable_tpe_invert = 1;
51498+#endif
51499+
51500+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51501+#ifndef CONFIG_GRKERNSEC_SYSCTL
51502+ grsec_lock = 1;
51503+#endif
51504+
51505+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51506+ grsec_enable_audit_textrel = 1;
51507+#endif
51508+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51509+ grsec_enable_log_rwxmaps = 1;
51510+#endif
51511+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51512+ grsec_enable_group = 1;
51513+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
51514+#endif
51515+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51516+ grsec_enable_chdir = 1;
51517+#endif
51518+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51519+ grsec_enable_harden_ptrace = 1;
51520+#endif
51521+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51522+ grsec_enable_mount = 1;
51523+#endif
51524+#ifdef CONFIG_GRKERNSEC_LINK
51525+ grsec_enable_link = 1;
51526+#endif
51527+#ifdef CONFIG_GRKERNSEC_BRUTE
51528+ grsec_enable_brute = 1;
51529+#endif
51530+#ifdef CONFIG_GRKERNSEC_DMESG
51531+ grsec_enable_dmesg = 1;
51532+#endif
51533+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51534+ grsec_enable_blackhole = 1;
51535+ grsec_lastack_retries = 4;
51536+#endif
51537+#ifdef CONFIG_GRKERNSEC_FIFO
51538+ grsec_enable_fifo = 1;
51539+#endif
51540+#ifdef CONFIG_GRKERNSEC_EXECLOG
51541+ grsec_enable_execlog = 1;
51542+#endif
51543+#ifdef CONFIG_GRKERNSEC_SIGNAL
51544+ grsec_enable_signal = 1;
51545+#endif
51546+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51547+ grsec_enable_forkfail = 1;
51548+#endif
51549+#ifdef CONFIG_GRKERNSEC_TIME
51550+ grsec_enable_time = 1;
51551+#endif
51552+#ifdef CONFIG_GRKERNSEC_RESLOG
51553+ grsec_resource_logging = 1;
51554+#endif
51555+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51556+ grsec_enable_chroot_findtask = 1;
51557+#endif
51558+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51559+ grsec_enable_chroot_unix = 1;
51560+#endif
51561+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51562+ grsec_enable_chroot_mount = 1;
51563+#endif
51564+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51565+ grsec_enable_chroot_fchdir = 1;
51566+#endif
51567+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51568+ grsec_enable_chroot_shmat = 1;
51569+#endif
51570+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51571+ grsec_enable_audit_ptrace = 1;
51572+#endif
51573+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51574+ grsec_enable_chroot_double = 1;
51575+#endif
51576+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51577+ grsec_enable_chroot_pivot = 1;
51578+#endif
51579+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51580+ grsec_enable_chroot_chdir = 1;
51581+#endif
51582+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51583+ grsec_enable_chroot_chmod = 1;
51584+#endif
51585+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51586+ grsec_enable_chroot_mknod = 1;
51587+#endif
51588+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51589+ grsec_enable_chroot_nice = 1;
51590+#endif
51591+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51592+ grsec_enable_chroot_execlog = 1;
51593+#endif
51594+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51595+ grsec_enable_chroot_caps = 1;
51596+#endif
51597+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51598+ grsec_enable_chroot_sysctl = 1;
51599+#endif
51600+#ifdef CONFIG_GRKERNSEC_TPE
51601+ grsec_enable_tpe = 1;
51602+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51603+#ifdef CONFIG_GRKERNSEC_TPE_ALL
51604+ grsec_enable_tpe_all = 1;
51605+#endif
51606+#endif
51607+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51608+ grsec_enable_socket_all = 1;
51609+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51610+#endif
51611+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51612+ grsec_enable_socket_client = 1;
51613+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51614+#endif
51615+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51616+ grsec_enable_socket_server = 1;
51617+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51618+#endif
51619+#endif
51620+
51621+ return;
51622+}
51623diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
51624--- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51625+++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
51626@@ -0,0 +1,43 @@
51627+#include <linux/kernel.h>
51628+#include <linux/sched.h>
51629+#include <linux/fs.h>
51630+#include <linux/file.h>
51631+#include <linux/grinternal.h>
51632+
51633+int
51634+gr_handle_follow_link(const struct inode *parent,
51635+ const struct inode *inode,
51636+ const struct dentry *dentry, const struct vfsmount *mnt)
51637+{
51638+#ifdef CONFIG_GRKERNSEC_LINK
51639+ const struct cred *cred = current_cred();
51640+
51641+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51642+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51643+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51644+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51645+ return -EACCES;
51646+ }
51647+#endif
51648+ return 0;
51649+}
51650+
51651+int
51652+gr_handle_hardlink(const struct dentry *dentry,
51653+ const struct vfsmount *mnt,
51654+ struct inode *inode, const int mode, const char *to)
51655+{
51656+#ifdef CONFIG_GRKERNSEC_LINK
51657+ const struct cred *cred = current_cred();
51658+
51659+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51660+ (!S_ISREG(mode) || (mode & S_ISUID) ||
51661+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51662+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51663+ !capable(CAP_FOWNER) && cred->uid) {
51664+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51665+ return -EPERM;
51666+ }
51667+#endif
51668+ return 0;
51669+}
51670diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
51671--- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51672+++ linux-3.0.4/grsecurity/grsec_log.c 2011-09-14 23:17:55.000000000 -0400
51673@@ -0,0 +1,313 @@
51674+#include <linux/kernel.h>
51675+#include <linux/sched.h>
51676+#include <linux/file.h>
51677+#include <linux/tty.h>
51678+#include <linux/fs.h>
51679+#include <linux/grinternal.h>
51680+
51681+#ifdef CONFIG_TREE_PREEMPT_RCU
51682+#define DISABLE_PREEMPT() preempt_disable()
51683+#define ENABLE_PREEMPT() preempt_enable()
51684+#else
51685+#define DISABLE_PREEMPT()
51686+#define ENABLE_PREEMPT()
51687+#endif
51688+
51689+#define BEGIN_LOCKS(x) \
51690+ DISABLE_PREEMPT(); \
51691+ rcu_read_lock(); \
51692+ read_lock(&tasklist_lock); \
51693+ read_lock(&grsec_exec_file_lock); \
51694+ if (x != GR_DO_AUDIT) \
51695+ spin_lock(&grsec_alert_lock); \
51696+ else \
51697+ spin_lock(&grsec_audit_lock)
51698+
51699+#define END_LOCKS(x) \
51700+ if (x != GR_DO_AUDIT) \
51701+ spin_unlock(&grsec_alert_lock); \
51702+ else \
51703+ spin_unlock(&grsec_audit_lock); \
51704+ read_unlock(&grsec_exec_file_lock); \
51705+ read_unlock(&tasklist_lock); \
51706+ rcu_read_unlock(); \
51707+ ENABLE_PREEMPT(); \
51708+ if (x == GR_DONT_AUDIT) \
51709+ gr_handle_alertkill(current)
51710+
51711+enum {
51712+ FLOODING,
51713+ NO_FLOODING
51714+};
51715+
51716+extern char *gr_alert_log_fmt;
51717+extern char *gr_audit_log_fmt;
51718+extern char *gr_alert_log_buf;
51719+extern char *gr_audit_log_buf;
51720+
51721+static int gr_log_start(int audit)
51722+{
51723+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51724+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51725+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51726+ unsigned long curr_secs = get_seconds();
51727+
51728+ if (audit == GR_DO_AUDIT)
51729+ goto set_fmt;
51730+
51731+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
51732+ grsec_alert_wtime = curr_secs;
51733+ grsec_alert_fyet = 0;
51734+ } else if (time_before(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
51735+ if (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST) {
51736+ grsec_alert_fyet++;
51737+ } else if (grsec_alert_fyet && grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51738+ grsec_alert_wtime = curr_secs;
51739+ grsec_alert_fyet++;
51740+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51741+ return FLOODING;
51742+ }
51743+ } else return FLOODING;
51744+
51745+set_fmt:
51746+ memset(buf, 0, PAGE_SIZE);
51747+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
51748+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51749+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51750+ } else if (current->signal->curr_ip) {
51751+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51752+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51753+ } else if (gr_acl_is_enabled()) {
51754+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51755+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51756+ } else {
51757+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
51758+ strcpy(buf, fmt);
51759+ }
51760+
51761+ return NO_FLOODING;
51762+}
51763+
51764+static void gr_log_middle(int audit, const char *msg, va_list ap)
51765+ __attribute__ ((format (printf, 2, 0)));
51766+
51767+static void gr_log_middle(int audit, const char *msg, va_list ap)
51768+{
51769+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51770+ unsigned int len = strlen(buf);
51771+
51772+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51773+
51774+ return;
51775+}
51776+
51777+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51778+ __attribute__ ((format (printf, 2, 3)));
51779+
51780+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51781+{
51782+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51783+ unsigned int len = strlen(buf);
51784+ va_list ap;
51785+
51786+ va_start(ap, msg);
51787+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51788+ va_end(ap);
51789+
51790+ return;
51791+}
51792+
51793+static void gr_log_end(int audit)
51794+{
51795+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51796+ unsigned int len = strlen(buf);
51797+
51798+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51799+ printk("%s\n", buf);
51800+
51801+ return;
51802+}
51803+
51804+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51805+{
51806+ int logtype;
51807+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51808+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51809+ void *voidptr = NULL;
51810+ int num1 = 0, num2 = 0;
51811+ unsigned long ulong1 = 0, ulong2 = 0;
51812+ struct dentry *dentry = NULL;
51813+ struct vfsmount *mnt = NULL;
51814+ struct file *file = NULL;
51815+ struct task_struct *task = NULL;
51816+ const struct cred *cred, *pcred;
51817+ va_list ap;
51818+
51819+ BEGIN_LOCKS(audit);
51820+ logtype = gr_log_start(audit);
51821+ if (logtype == FLOODING) {
51822+ END_LOCKS(audit);
51823+ return;
51824+ }
51825+ va_start(ap, argtypes);
51826+ switch (argtypes) {
51827+ case GR_TTYSNIFF:
51828+ task = va_arg(ap, struct task_struct *);
51829+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51830+ break;
51831+ case GR_SYSCTL_HIDDEN:
51832+ str1 = va_arg(ap, char *);
51833+ gr_log_middle_varargs(audit, msg, result, str1);
51834+ break;
51835+ case GR_RBAC:
51836+ dentry = va_arg(ap, struct dentry *);
51837+ mnt = va_arg(ap, struct vfsmount *);
51838+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51839+ break;
51840+ case GR_RBAC_STR:
51841+ dentry = va_arg(ap, struct dentry *);
51842+ mnt = va_arg(ap, struct vfsmount *);
51843+ str1 = va_arg(ap, char *);
51844+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51845+ break;
51846+ case GR_STR_RBAC:
51847+ str1 = va_arg(ap, char *);
51848+ dentry = va_arg(ap, struct dentry *);
51849+ mnt = va_arg(ap, struct vfsmount *);
51850+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51851+ break;
51852+ case GR_RBAC_MODE2:
51853+ dentry = va_arg(ap, struct dentry *);
51854+ mnt = va_arg(ap, struct vfsmount *);
51855+ str1 = va_arg(ap, char *);
51856+ str2 = va_arg(ap, char *);
51857+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51858+ break;
51859+ case GR_RBAC_MODE3:
51860+ dentry = va_arg(ap, struct dentry *);
51861+ mnt = va_arg(ap, struct vfsmount *);
51862+ str1 = va_arg(ap, char *);
51863+ str2 = va_arg(ap, char *);
51864+ str3 = va_arg(ap, char *);
51865+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51866+ break;
51867+ case GR_FILENAME:
51868+ dentry = va_arg(ap, struct dentry *);
51869+ mnt = va_arg(ap, struct vfsmount *);
51870+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51871+ break;
51872+ case GR_STR_FILENAME:
51873+ str1 = va_arg(ap, char *);
51874+ dentry = va_arg(ap, struct dentry *);
51875+ mnt = va_arg(ap, struct vfsmount *);
51876+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51877+ break;
51878+ case GR_FILENAME_STR:
51879+ dentry = va_arg(ap, struct dentry *);
51880+ mnt = va_arg(ap, struct vfsmount *);
51881+ str1 = va_arg(ap, char *);
51882+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51883+ break;
51884+ case GR_FILENAME_TWO_INT:
51885+ dentry = va_arg(ap, struct dentry *);
51886+ mnt = va_arg(ap, struct vfsmount *);
51887+ num1 = va_arg(ap, int);
51888+ num2 = va_arg(ap, int);
51889+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51890+ break;
51891+ case GR_FILENAME_TWO_INT_STR:
51892+ dentry = va_arg(ap, struct dentry *);
51893+ mnt = va_arg(ap, struct vfsmount *);
51894+ num1 = va_arg(ap, int);
51895+ num2 = va_arg(ap, int);
51896+ str1 = va_arg(ap, char *);
51897+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51898+ break;
51899+ case GR_TEXTREL:
51900+ file = va_arg(ap, struct file *);
51901+ ulong1 = va_arg(ap, unsigned long);
51902+ ulong2 = va_arg(ap, unsigned long);
51903+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51904+ break;
51905+ case GR_PTRACE:
51906+ task = va_arg(ap, struct task_struct *);
51907+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51908+ break;
51909+ case GR_RESOURCE:
51910+ task = va_arg(ap, struct task_struct *);
51911+ cred = __task_cred(task);
51912+ pcred = __task_cred(task->real_parent);
51913+ ulong1 = va_arg(ap, unsigned long);
51914+ str1 = va_arg(ap, char *);
51915+ ulong2 = va_arg(ap, unsigned long);
51916+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51917+ break;
51918+ case GR_CAP:
51919+ task = va_arg(ap, struct task_struct *);
51920+ cred = __task_cred(task);
51921+ pcred = __task_cred(task->real_parent);
51922+ str1 = va_arg(ap, char *);
51923+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51924+ break;
51925+ case GR_SIG:
51926+ str1 = va_arg(ap, char *);
51927+ voidptr = va_arg(ap, void *);
51928+ gr_log_middle_varargs(audit, msg, str1, voidptr);
51929+ break;
51930+ case GR_SIG2:
51931+ task = va_arg(ap, struct task_struct *);
51932+ cred = __task_cred(task);
51933+ pcred = __task_cred(task->real_parent);
51934+ num1 = va_arg(ap, int);
51935+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51936+ break;
51937+ case GR_CRASH1:
51938+ task = va_arg(ap, struct task_struct *);
51939+ cred = __task_cred(task);
51940+ pcred = __task_cred(task->real_parent);
51941+ ulong1 = va_arg(ap, unsigned long);
51942+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51943+ break;
51944+ case GR_CRASH2:
51945+ task = va_arg(ap, struct task_struct *);
51946+ cred = __task_cred(task);
51947+ pcred = __task_cred(task->real_parent);
51948+ ulong1 = va_arg(ap, unsigned long);
51949+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51950+ break;
51951+ case GR_RWXMAP:
51952+ file = va_arg(ap, struct file *);
51953+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51954+ break;
51955+ case GR_PSACCT:
51956+ {
51957+ unsigned int wday, cday;
51958+ __u8 whr, chr;
51959+ __u8 wmin, cmin;
51960+ __u8 wsec, csec;
51961+ char cur_tty[64] = { 0 };
51962+ char parent_tty[64] = { 0 };
51963+
51964+ task = va_arg(ap, struct task_struct *);
51965+ wday = va_arg(ap, unsigned int);
51966+ cday = va_arg(ap, unsigned int);
51967+ whr = va_arg(ap, int);
51968+ chr = va_arg(ap, int);
51969+ wmin = va_arg(ap, int);
51970+ cmin = va_arg(ap, int);
51971+ wsec = va_arg(ap, int);
51972+ csec = va_arg(ap, int);
51973+ ulong1 = va_arg(ap, unsigned long);
51974+ cred = __task_cred(task);
51975+ pcred = __task_cred(task->real_parent);
51976+
51977+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51978+ }
51979+ break;
51980+ default:
51981+ gr_log_middle(audit, msg, ap);
51982+ }
51983+ va_end(ap);
51984+ gr_log_end(audit);
51985+ END_LOCKS(audit);
51986+}
51987diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
51988--- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51989+++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
51990@@ -0,0 +1,33 @@
51991+#include <linux/kernel.h>
51992+#include <linux/sched.h>
51993+#include <linux/mm.h>
51994+#include <linux/mman.h>
51995+#include <linux/grinternal.h>
51996+
51997+void
51998+gr_handle_ioperm(void)
51999+{
52000+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
52001+ return;
52002+}
52003+
52004+void
52005+gr_handle_iopl(void)
52006+{
52007+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
52008+ return;
52009+}
52010+
52011+void
52012+gr_handle_mem_readwrite(u64 from, u64 to)
52013+{
52014+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
52015+ return;
52016+}
52017+
52018+void
52019+gr_handle_vm86(void)
52020+{
52021+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
52022+ return;
52023+}
52024diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
52025--- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
52026+++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
52027@@ -0,0 +1,62 @@
52028+#include <linux/kernel.h>
52029+#include <linux/sched.h>
52030+#include <linux/mount.h>
52031+#include <linux/grsecurity.h>
52032+#include <linux/grinternal.h>
52033+
52034+void
52035+gr_log_remount(const char *devname, const int retval)
52036+{
52037+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52038+ if (grsec_enable_mount && (retval >= 0))
52039+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
52040+#endif
52041+ return;
52042+}
52043+
52044+void
52045+gr_log_unmount(const char *devname, const int retval)
52046+{
52047+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52048+ if (grsec_enable_mount && (retval >= 0))
52049+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
52050+#endif
52051+ return;
52052+}
52053+
52054+void
52055+gr_log_mount(const char *from, const char *to, const int retval)
52056+{
52057+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52058+ if (grsec_enable_mount && (retval >= 0))
52059+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
52060+#endif
52061+ return;
52062+}
52063+
52064+int
52065+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
52066+{
52067+#ifdef CONFIG_GRKERNSEC_ROFS
52068+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
52069+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
52070+ return -EPERM;
52071+ } else
52072+ return 0;
52073+#endif
52074+ return 0;
52075+}
52076+
52077+int
52078+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
52079+{
52080+#ifdef CONFIG_GRKERNSEC_ROFS
52081+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
52082+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
52083+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
52084+ return -EPERM;
52085+ } else
52086+ return 0;
52087+#endif
52088+ return 0;
52089+}
52090diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
52091--- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
52092+++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
52093@@ -0,0 +1,36 @@
52094+#include <linux/kernel.h>
52095+#include <linux/sched.h>
52096+#include <linux/mm.h>
52097+#include <linux/file.h>
52098+#include <linux/grinternal.h>
52099+#include <linux/grsecurity.h>
52100+
52101+void
52102+gr_log_textrel(struct vm_area_struct * vma)
52103+{
52104+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52105+ if (grsec_enable_audit_textrel)
52106+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
52107+#endif
52108+ return;
52109+}
52110+
52111+void
52112+gr_log_rwxmmap(struct file *file)
52113+{
52114+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52115+ if (grsec_enable_log_rwxmaps)
52116+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
52117+#endif
52118+ return;
52119+}
52120+
52121+void
52122+gr_log_rwxmprotect(struct file *file)
52123+{
52124+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52125+ if (grsec_enable_log_rwxmaps)
52126+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
52127+#endif
52128+ return;
52129+}
52130diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
52131--- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
52132+++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
52133@@ -0,0 +1,14 @@
52134+#include <linux/kernel.h>
52135+#include <linux/sched.h>
52136+#include <linux/grinternal.h>
52137+#include <linux/grsecurity.h>
52138+
52139+void
52140+gr_audit_ptrace(struct task_struct *task)
52141+{
52142+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52143+ if (grsec_enable_audit_ptrace)
52144+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
52145+#endif
52146+ return;
52147+}
52148diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
52149--- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
52150+++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
52151@@ -0,0 +1,206 @@
52152+#include <linux/kernel.h>
52153+#include <linux/sched.h>
52154+#include <linux/delay.h>
52155+#include <linux/grsecurity.h>
52156+#include <linux/grinternal.h>
52157+#include <linux/hardirq.h>
52158+
52159+char *signames[] = {
52160+ [SIGSEGV] = "Segmentation fault",
52161+ [SIGILL] = "Illegal instruction",
52162+ [SIGABRT] = "Abort",
52163+ [SIGBUS] = "Invalid alignment/Bus error"
52164+};
52165+
52166+void
52167+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
52168+{
52169+#ifdef CONFIG_GRKERNSEC_SIGNAL
52170+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
52171+ (sig == SIGABRT) || (sig == SIGBUS))) {
52172+ if (t->pid == current->pid) {
52173+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
52174+ } else {
52175+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
52176+ }
52177+ }
52178+#endif
52179+ return;
52180+}
52181+
52182+int
52183+gr_handle_signal(const struct task_struct *p, const int sig)
52184+{
52185+#ifdef CONFIG_GRKERNSEC
52186+ if (current->pid > 1 && gr_check_protected_task(p)) {
52187+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
52188+ return -EPERM;
52189+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
52190+ return -EPERM;
52191+ }
52192+#endif
52193+ return 0;
52194+}
52195+
52196+#ifdef CONFIG_GRKERNSEC
52197+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
52198+
52199+int gr_fake_force_sig(int sig, struct task_struct *t)
52200+{
52201+ unsigned long int flags;
52202+ int ret, blocked, ignored;
52203+ struct k_sigaction *action;
52204+
52205+ spin_lock_irqsave(&t->sighand->siglock, flags);
52206+ action = &t->sighand->action[sig-1];
52207+ ignored = action->sa.sa_handler == SIG_IGN;
52208+ blocked = sigismember(&t->blocked, sig);
52209+ if (blocked || ignored) {
52210+ action->sa.sa_handler = SIG_DFL;
52211+ if (blocked) {
52212+ sigdelset(&t->blocked, sig);
52213+ recalc_sigpending_and_wake(t);
52214+ }
52215+ }
52216+ if (action->sa.sa_handler == SIG_DFL)
52217+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
52218+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
52219+
52220+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
52221+
52222+ return ret;
52223+}
52224+#endif
52225+
52226+#ifdef CONFIG_GRKERNSEC_BRUTE
52227+#define GR_USER_BAN_TIME (15 * 60)
52228+
52229+static int __get_dumpable(unsigned long mm_flags)
52230+{
52231+ int ret;
52232+
52233+ ret = mm_flags & MMF_DUMPABLE_MASK;
52234+ return (ret >= 2) ? 2 : ret;
52235+}
52236+#endif
52237+
52238+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
52239+{
52240+#ifdef CONFIG_GRKERNSEC_BRUTE
52241+ uid_t uid = 0;
52242+
52243+ if (!grsec_enable_brute)
52244+ return;
52245+
52246+ rcu_read_lock();
52247+ read_lock(&tasklist_lock);
52248+ read_lock(&grsec_exec_file_lock);
52249+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
52250+ p->real_parent->brute = 1;
52251+ else {
52252+ const struct cred *cred = __task_cred(p), *cred2;
52253+ struct task_struct *tsk, *tsk2;
52254+
52255+ if (!__get_dumpable(mm_flags) && cred->uid) {
52256+ struct user_struct *user;
52257+
52258+ uid = cred->uid;
52259+
52260+ /* this is put upon execution past expiration */
52261+ user = find_user(uid);
52262+ if (user == NULL)
52263+ goto unlock;
52264+ user->banned = 1;
52265+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
52266+ if (user->ban_expires == ~0UL)
52267+ user->ban_expires--;
52268+
52269+ do_each_thread(tsk2, tsk) {
52270+ cred2 = __task_cred(tsk);
52271+ if (tsk != p && cred2->uid == uid)
52272+ gr_fake_force_sig(SIGKILL, tsk);
52273+ } while_each_thread(tsk2, tsk);
52274+ }
52275+ }
52276+unlock:
52277+ read_unlock(&grsec_exec_file_lock);
52278+ read_unlock(&tasklist_lock);
52279+ rcu_read_unlock();
52280+
52281+ if (uid)
52282+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
52283+
52284+#endif
52285+ return;
52286+}
52287+
52288+void gr_handle_brute_check(void)
52289+{
52290+#ifdef CONFIG_GRKERNSEC_BRUTE
52291+ if (current->brute)
52292+ msleep(30 * 1000);
52293+#endif
52294+ return;
52295+}
52296+
52297+void gr_handle_kernel_exploit(void)
52298+{
52299+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
52300+ const struct cred *cred;
52301+ struct task_struct *tsk, *tsk2;
52302+ struct user_struct *user;
52303+ uid_t uid;
52304+
52305+ if (in_irq() || in_serving_softirq() || in_nmi())
52306+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
52307+
52308+ uid = current_uid();
52309+
52310+ if (uid == 0)
52311+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
52312+ else {
52313+ /* kill all the processes of this user, hold a reference
52314+ to their creds struct, and prevent them from creating
52315+ another process until system reset
52316+ */
52317+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
52318+ /* we intentionally leak this ref */
52319+ user = get_uid(current->cred->user);
52320+ if (user) {
52321+ user->banned = 1;
52322+ user->ban_expires = ~0UL;
52323+ }
52324+
52325+ read_lock(&tasklist_lock);
52326+ do_each_thread(tsk2, tsk) {
52327+ cred = __task_cred(tsk);
52328+ if (cred->uid == uid)
52329+ gr_fake_force_sig(SIGKILL, tsk);
52330+ } while_each_thread(tsk2, tsk);
52331+ read_unlock(&tasklist_lock);
52332+ }
52333+#endif
52334+}
52335+
52336+int __gr_process_user_ban(struct user_struct *user)
52337+{
52338+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52339+ if (unlikely(user->banned)) {
52340+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
52341+ user->banned = 0;
52342+ user->ban_expires = 0;
52343+ free_uid(user);
52344+ } else
52345+ return -EPERM;
52346+ }
52347+#endif
52348+ return 0;
52349+}
52350+
52351+int gr_process_user_ban(void)
52352+{
52353+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52354+ return __gr_process_user_ban(current->cred->user);
52355+#endif
52356+ return 0;
52357+}
52358diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
52359--- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
52360+++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
52361@@ -0,0 +1,244 @@
52362+#include <linux/kernel.h>
52363+#include <linux/module.h>
52364+#include <linux/sched.h>
52365+#include <linux/file.h>
52366+#include <linux/net.h>
52367+#include <linux/in.h>
52368+#include <linux/ip.h>
52369+#include <net/sock.h>
52370+#include <net/inet_sock.h>
52371+#include <linux/grsecurity.h>
52372+#include <linux/grinternal.h>
52373+#include <linux/gracl.h>
52374+
52375+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
52376+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
52377+
52378+EXPORT_SYMBOL(gr_search_udp_recvmsg);
52379+EXPORT_SYMBOL(gr_search_udp_sendmsg);
52380+
52381+#ifdef CONFIG_UNIX_MODULE
52382+EXPORT_SYMBOL(gr_acl_handle_unix);
52383+EXPORT_SYMBOL(gr_acl_handle_mknod);
52384+EXPORT_SYMBOL(gr_handle_chroot_unix);
52385+EXPORT_SYMBOL(gr_handle_create);
52386+#endif
52387+
52388+#ifdef CONFIG_GRKERNSEC
52389+#define gr_conn_table_size 32749
52390+struct conn_table_entry {
52391+ struct conn_table_entry *next;
52392+ struct signal_struct *sig;
52393+};
52394+
52395+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
52396+DEFINE_SPINLOCK(gr_conn_table_lock);
52397+
52398+extern const char * gr_socktype_to_name(unsigned char type);
52399+extern const char * gr_proto_to_name(unsigned char proto);
52400+extern const char * gr_sockfamily_to_name(unsigned char family);
52401+
52402+static __inline__ int
52403+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
52404+{
52405+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
52406+}
52407+
52408+static __inline__ int
52409+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
52410+ __u16 sport, __u16 dport)
52411+{
52412+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
52413+ sig->gr_sport == sport && sig->gr_dport == dport))
52414+ return 1;
52415+ else
52416+ return 0;
52417+}
52418+
52419+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
52420+{
52421+ struct conn_table_entry **match;
52422+ unsigned int index;
52423+
52424+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52425+ sig->gr_sport, sig->gr_dport,
52426+ gr_conn_table_size);
52427+
52428+ newent->sig = sig;
52429+
52430+ match = &gr_conn_table[index];
52431+ newent->next = *match;
52432+ *match = newent;
52433+
52434+ return;
52435+}
52436+
52437+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
52438+{
52439+ struct conn_table_entry *match, *last = NULL;
52440+ unsigned int index;
52441+
52442+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52443+ sig->gr_sport, sig->gr_dport,
52444+ gr_conn_table_size);
52445+
52446+ match = gr_conn_table[index];
52447+ while (match && !conn_match(match->sig,
52448+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
52449+ sig->gr_dport)) {
52450+ last = match;
52451+ match = match->next;
52452+ }
52453+
52454+ if (match) {
52455+ if (last)
52456+ last->next = match->next;
52457+ else
52458+ gr_conn_table[index] = NULL;
52459+ kfree(match);
52460+ }
52461+
52462+ return;
52463+}
52464+
52465+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
52466+ __u16 sport, __u16 dport)
52467+{
52468+ struct conn_table_entry *match;
52469+ unsigned int index;
52470+
52471+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
52472+
52473+ match = gr_conn_table[index];
52474+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
52475+ match = match->next;
52476+
52477+ if (match)
52478+ return match->sig;
52479+ else
52480+ return NULL;
52481+}
52482+
52483+#endif
52484+
52485+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
52486+{
52487+#ifdef CONFIG_GRKERNSEC
52488+ struct signal_struct *sig = task->signal;
52489+ struct conn_table_entry *newent;
52490+
52491+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
52492+ if (newent == NULL)
52493+ return;
52494+ /* no bh lock needed since we are called with bh disabled */
52495+ spin_lock(&gr_conn_table_lock);
52496+ gr_del_task_from_ip_table_nolock(sig);
52497+ sig->gr_saddr = inet->inet_rcv_saddr;
52498+ sig->gr_daddr = inet->inet_daddr;
52499+ sig->gr_sport = inet->inet_sport;
52500+ sig->gr_dport = inet->inet_dport;
52501+ gr_add_to_task_ip_table_nolock(sig, newent);
52502+ spin_unlock(&gr_conn_table_lock);
52503+#endif
52504+ return;
52505+}
52506+
52507+void gr_del_task_from_ip_table(struct task_struct *task)
52508+{
52509+#ifdef CONFIG_GRKERNSEC
52510+ spin_lock_bh(&gr_conn_table_lock);
52511+ gr_del_task_from_ip_table_nolock(task->signal);
52512+ spin_unlock_bh(&gr_conn_table_lock);
52513+#endif
52514+ return;
52515+}
52516+
52517+void
52518+gr_attach_curr_ip(const struct sock *sk)
52519+{
52520+#ifdef CONFIG_GRKERNSEC
52521+ struct signal_struct *p, *set;
52522+ const struct inet_sock *inet = inet_sk(sk);
52523+
52524+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
52525+ return;
52526+
52527+ set = current->signal;
52528+
52529+ spin_lock_bh(&gr_conn_table_lock);
52530+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
52531+ inet->inet_dport, inet->inet_sport);
52532+ if (unlikely(p != NULL)) {
52533+ set->curr_ip = p->curr_ip;
52534+ set->used_accept = 1;
52535+ gr_del_task_from_ip_table_nolock(p);
52536+ spin_unlock_bh(&gr_conn_table_lock);
52537+ return;
52538+ }
52539+ spin_unlock_bh(&gr_conn_table_lock);
52540+
52541+ set->curr_ip = inet->inet_daddr;
52542+ set->used_accept = 1;
52543+#endif
52544+ return;
52545+}
52546+
52547+int
52548+gr_handle_sock_all(const int family, const int type, const int protocol)
52549+{
52550+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52551+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52552+ (family != AF_UNIX)) {
52553+ if (family == AF_INET)
52554+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52555+ else
52556+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52557+ return -EACCES;
52558+ }
52559+#endif
52560+ return 0;
52561+}
52562+
52563+int
52564+gr_handle_sock_server(const struct sockaddr *sck)
52565+{
52566+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52567+ if (grsec_enable_socket_server &&
52568+ in_group_p(grsec_socket_server_gid) &&
52569+ sck && (sck->sa_family != AF_UNIX) &&
52570+ (sck->sa_family != AF_LOCAL)) {
52571+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52572+ return -EACCES;
52573+ }
52574+#endif
52575+ return 0;
52576+}
52577+
52578+int
52579+gr_handle_sock_server_other(const struct sock *sck)
52580+{
52581+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52582+ if (grsec_enable_socket_server &&
52583+ in_group_p(grsec_socket_server_gid) &&
52584+ sck && (sck->sk_family != AF_UNIX) &&
52585+ (sck->sk_family != AF_LOCAL)) {
52586+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52587+ return -EACCES;
52588+ }
52589+#endif
52590+ return 0;
52591+}
52592+
52593+int
52594+gr_handle_sock_client(const struct sockaddr *sck)
52595+{
52596+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52597+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52598+ sck && (sck->sa_family != AF_UNIX) &&
52599+ (sck->sa_family != AF_LOCAL)) {
52600+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52601+ return -EACCES;
52602+ }
52603+#endif
52604+ return 0;
52605+}
52606diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
52607--- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52608+++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
52609@@ -0,0 +1,433 @@
52610+#include <linux/kernel.h>
52611+#include <linux/sched.h>
52612+#include <linux/sysctl.h>
52613+#include <linux/grsecurity.h>
52614+#include <linux/grinternal.h>
52615+
52616+int
52617+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52618+{
52619+#ifdef CONFIG_GRKERNSEC_SYSCTL
52620+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52621+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52622+ return -EACCES;
52623+ }
52624+#endif
52625+ return 0;
52626+}
52627+
52628+#ifdef CONFIG_GRKERNSEC_ROFS
52629+static int __maybe_unused one = 1;
52630+#endif
52631+
52632+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52633+struct ctl_table grsecurity_table[] = {
52634+#ifdef CONFIG_GRKERNSEC_SYSCTL
52635+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52636+#ifdef CONFIG_GRKERNSEC_IO
52637+ {
52638+ .procname = "disable_priv_io",
52639+ .data = &grsec_disable_privio,
52640+ .maxlen = sizeof(int),
52641+ .mode = 0600,
52642+ .proc_handler = &proc_dointvec,
52643+ },
52644+#endif
52645+#endif
52646+#ifdef CONFIG_GRKERNSEC_LINK
52647+ {
52648+ .procname = "linking_restrictions",
52649+ .data = &grsec_enable_link,
52650+ .maxlen = sizeof(int),
52651+ .mode = 0600,
52652+ .proc_handler = &proc_dointvec,
52653+ },
52654+#endif
52655+#ifdef CONFIG_GRKERNSEC_BRUTE
52656+ {
52657+ .procname = "deter_bruteforce",
52658+ .data = &grsec_enable_brute,
52659+ .maxlen = sizeof(int),
52660+ .mode = 0600,
52661+ .proc_handler = &proc_dointvec,
52662+ },
52663+#endif
52664+#ifdef CONFIG_GRKERNSEC_FIFO
52665+ {
52666+ .procname = "fifo_restrictions",
52667+ .data = &grsec_enable_fifo,
52668+ .maxlen = sizeof(int),
52669+ .mode = 0600,
52670+ .proc_handler = &proc_dointvec,
52671+ },
52672+#endif
52673+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52674+ {
52675+ .procname = "ip_blackhole",
52676+ .data = &grsec_enable_blackhole,
52677+ .maxlen = sizeof(int),
52678+ .mode = 0600,
52679+ .proc_handler = &proc_dointvec,
52680+ },
52681+ {
52682+ .procname = "lastack_retries",
52683+ .data = &grsec_lastack_retries,
52684+ .maxlen = sizeof(int),
52685+ .mode = 0600,
52686+ .proc_handler = &proc_dointvec,
52687+ },
52688+#endif
52689+#ifdef CONFIG_GRKERNSEC_EXECLOG
52690+ {
52691+ .procname = "exec_logging",
52692+ .data = &grsec_enable_execlog,
52693+ .maxlen = sizeof(int),
52694+ .mode = 0600,
52695+ .proc_handler = &proc_dointvec,
52696+ },
52697+#endif
52698+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52699+ {
52700+ .procname = "rwxmap_logging",
52701+ .data = &grsec_enable_log_rwxmaps,
52702+ .maxlen = sizeof(int),
52703+ .mode = 0600,
52704+ .proc_handler = &proc_dointvec,
52705+ },
52706+#endif
52707+#ifdef CONFIG_GRKERNSEC_SIGNAL
52708+ {
52709+ .procname = "signal_logging",
52710+ .data = &grsec_enable_signal,
52711+ .maxlen = sizeof(int),
52712+ .mode = 0600,
52713+ .proc_handler = &proc_dointvec,
52714+ },
52715+#endif
52716+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52717+ {
52718+ .procname = "forkfail_logging",
52719+ .data = &grsec_enable_forkfail,
52720+ .maxlen = sizeof(int),
52721+ .mode = 0600,
52722+ .proc_handler = &proc_dointvec,
52723+ },
52724+#endif
52725+#ifdef CONFIG_GRKERNSEC_TIME
52726+ {
52727+ .procname = "timechange_logging",
52728+ .data = &grsec_enable_time,
52729+ .maxlen = sizeof(int),
52730+ .mode = 0600,
52731+ .proc_handler = &proc_dointvec,
52732+ },
52733+#endif
52734+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52735+ {
52736+ .procname = "chroot_deny_shmat",
52737+ .data = &grsec_enable_chroot_shmat,
52738+ .maxlen = sizeof(int),
52739+ .mode = 0600,
52740+ .proc_handler = &proc_dointvec,
52741+ },
52742+#endif
52743+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52744+ {
52745+ .procname = "chroot_deny_unix",
52746+ .data = &grsec_enable_chroot_unix,
52747+ .maxlen = sizeof(int),
52748+ .mode = 0600,
52749+ .proc_handler = &proc_dointvec,
52750+ },
52751+#endif
52752+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52753+ {
52754+ .procname = "chroot_deny_mount",
52755+ .data = &grsec_enable_chroot_mount,
52756+ .maxlen = sizeof(int),
52757+ .mode = 0600,
52758+ .proc_handler = &proc_dointvec,
52759+ },
52760+#endif
52761+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52762+ {
52763+ .procname = "chroot_deny_fchdir",
52764+ .data = &grsec_enable_chroot_fchdir,
52765+ .maxlen = sizeof(int),
52766+ .mode = 0600,
52767+ .proc_handler = &proc_dointvec,
52768+ },
52769+#endif
52770+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52771+ {
52772+ .procname = "chroot_deny_chroot",
52773+ .data = &grsec_enable_chroot_double,
52774+ .maxlen = sizeof(int),
52775+ .mode = 0600,
52776+ .proc_handler = &proc_dointvec,
52777+ },
52778+#endif
52779+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52780+ {
52781+ .procname = "chroot_deny_pivot",
52782+ .data = &grsec_enable_chroot_pivot,
52783+ .maxlen = sizeof(int),
52784+ .mode = 0600,
52785+ .proc_handler = &proc_dointvec,
52786+ },
52787+#endif
52788+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52789+ {
52790+ .procname = "chroot_enforce_chdir",
52791+ .data = &grsec_enable_chroot_chdir,
52792+ .maxlen = sizeof(int),
52793+ .mode = 0600,
52794+ .proc_handler = &proc_dointvec,
52795+ },
52796+#endif
52797+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52798+ {
52799+ .procname = "chroot_deny_chmod",
52800+ .data = &grsec_enable_chroot_chmod,
52801+ .maxlen = sizeof(int),
52802+ .mode = 0600,
52803+ .proc_handler = &proc_dointvec,
52804+ },
52805+#endif
52806+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52807+ {
52808+ .procname = "chroot_deny_mknod",
52809+ .data = &grsec_enable_chroot_mknod,
52810+ .maxlen = sizeof(int),
52811+ .mode = 0600,
52812+ .proc_handler = &proc_dointvec,
52813+ },
52814+#endif
52815+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52816+ {
52817+ .procname = "chroot_restrict_nice",
52818+ .data = &grsec_enable_chroot_nice,
52819+ .maxlen = sizeof(int),
52820+ .mode = 0600,
52821+ .proc_handler = &proc_dointvec,
52822+ },
52823+#endif
52824+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52825+ {
52826+ .procname = "chroot_execlog",
52827+ .data = &grsec_enable_chroot_execlog,
52828+ .maxlen = sizeof(int),
52829+ .mode = 0600,
52830+ .proc_handler = &proc_dointvec,
52831+ },
52832+#endif
52833+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52834+ {
52835+ .procname = "chroot_caps",
52836+ .data = &grsec_enable_chroot_caps,
52837+ .maxlen = sizeof(int),
52838+ .mode = 0600,
52839+ .proc_handler = &proc_dointvec,
52840+ },
52841+#endif
52842+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52843+ {
52844+ .procname = "chroot_deny_sysctl",
52845+ .data = &grsec_enable_chroot_sysctl,
52846+ .maxlen = sizeof(int),
52847+ .mode = 0600,
52848+ .proc_handler = &proc_dointvec,
52849+ },
52850+#endif
52851+#ifdef CONFIG_GRKERNSEC_TPE
52852+ {
52853+ .procname = "tpe",
52854+ .data = &grsec_enable_tpe,
52855+ .maxlen = sizeof(int),
52856+ .mode = 0600,
52857+ .proc_handler = &proc_dointvec,
52858+ },
52859+ {
52860+ .procname = "tpe_gid",
52861+ .data = &grsec_tpe_gid,
52862+ .maxlen = sizeof(int),
52863+ .mode = 0600,
52864+ .proc_handler = &proc_dointvec,
52865+ },
52866+#endif
52867+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52868+ {
52869+ .procname = "tpe_invert",
52870+ .data = &grsec_enable_tpe_invert,
52871+ .maxlen = sizeof(int),
52872+ .mode = 0600,
52873+ .proc_handler = &proc_dointvec,
52874+ },
52875+#endif
52876+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52877+ {
52878+ .procname = "tpe_restrict_all",
52879+ .data = &grsec_enable_tpe_all,
52880+ .maxlen = sizeof(int),
52881+ .mode = 0600,
52882+ .proc_handler = &proc_dointvec,
52883+ },
52884+#endif
52885+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52886+ {
52887+ .procname = "socket_all",
52888+ .data = &grsec_enable_socket_all,
52889+ .maxlen = sizeof(int),
52890+ .mode = 0600,
52891+ .proc_handler = &proc_dointvec,
52892+ },
52893+ {
52894+ .procname = "socket_all_gid",
52895+ .data = &grsec_socket_all_gid,
52896+ .maxlen = sizeof(int),
52897+ .mode = 0600,
52898+ .proc_handler = &proc_dointvec,
52899+ },
52900+#endif
52901+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52902+ {
52903+ .procname = "socket_client",
52904+ .data = &grsec_enable_socket_client,
52905+ .maxlen = sizeof(int),
52906+ .mode = 0600,
52907+ .proc_handler = &proc_dointvec,
52908+ },
52909+ {
52910+ .procname = "socket_client_gid",
52911+ .data = &grsec_socket_client_gid,
52912+ .maxlen = sizeof(int),
52913+ .mode = 0600,
52914+ .proc_handler = &proc_dointvec,
52915+ },
52916+#endif
52917+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52918+ {
52919+ .procname = "socket_server",
52920+ .data = &grsec_enable_socket_server,
52921+ .maxlen = sizeof(int),
52922+ .mode = 0600,
52923+ .proc_handler = &proc_dointvec,
52924+ },
52925+ {
52926+ .procname = "socket_server_gid",
52927+ .data = &grsec_socket_server_gid,
52928+ .maxlen = sizeof(int),
52929+ .mode = 0600,
52930+ .proc_handler = &proc_dointvec,
52931+ },
52932+#endif
52933+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52934+ {
52935+ .procname = "audit_group",
52936+ .data = &grsec_enable_group,
52937+ .maxlen = sizeof(int),
52938+ .mode = 0600,
52939+ .proc_handler = &proc_dointvec,
52940+ },
52941+ {
52942+ .procname = "audit_gid",
52943+ .data = &grsec_audit_gid,
52944+ .maxlen = sizeof(int),
52945+ .mode = 0600,
52946+ .proc_handler = &proc_dointvec,
52947+ },
52948+#endif
52949+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52950+ {
52951+ .procname = "audit_chdir",
52952+ .data = &grsec_enable_chdir,
52953+ .maxlen = sizeof(int),
52954+ .mode = 0600,
52955+ .proc_handler = &proc_dointvec,
52956+ },
52957+#endif
52958+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52959+ {
52960+ .procname = "audit_mount",
52961+ .data = &grsec_enable_mount,
52962+ .maxlen = sizeof(int),
52963+ .mode = 0600,
52964+ .proc_handler = &proc_dointvec,
52965+ },
52966+#endif
52967+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52968+ {
52969+ .procname = "audit_textrel",
52970+ .data = &grsec_enable_audit_textrel,
52971+ .maxlen = sizeof(int),
52972+ .mode = 0600,
52973+ .proc_handler = &proc_dointvec,
52974+ },
52975+#endif
52976+#ifdef CONFIG_GRKERNSEC_DMESG
52977+ {
52978+ .procname = "dmesg",
52979+ .data = &grsec_enable_dmesg,
52980+ .maxlen = sizeof(int),
52981+ .mode = 0600,
52982+ .proc_handler = &proc_dointvec,
52983+ },
52984+#endif
52985+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52986+ {
52987+ .procname = "chroot_findtask",
52988+ .data = &grsec_enable_chroot_findtask,
52989+ .maxlen = sizeof(int),
52990+ .mode = 0600,
52991+ .proc_handler = &proc_dointvec,
52992+ },
52993+#endif
52994+#ifdef CONFIG_GRKERNSEC_RESLOG
52995+ {
52996+ .procname = "resource_logging",
52997+ .data = &grsec_resource_logging,
52998+ .maxlen = sizeof(int),
52999+ .mode = 0600,
53000+ .proc_handler = &proc_dointvec,
53001+ },
53002+#endif
53003+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53004+ {
53005+ .procname = "audit_ptrace",
53006+ .data = &grsec_enable_audit_ptrace,
53007+ .maxlen = sizeof(int),
53008+ .mode = 0600,
53009+ .proc_handler = &proc_dointvec,
53010+ },
53011+#endif
53012+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53013+ {
53014+ .procname = "harden_ptrace",
53015+ .data = &grsec_enable_harden_ptrace,
53016+ .maxlen = sizeof(int),
53017+ .mode = 0600,
53018+ .proc_handler = &proc_dointvec,
53019+ },
53020+#endif
53021+ {
53022+ .procname = "grsec_lock",
53023+ .data = &grsec_lock,
53024+ .maxlen = sizeof(int),
53025+ .mode = 0600,
53026+ .proc_handler = &proc_dointvec,
53027+ },
53028+#endif
53029+#ifdef CONFIG_GRKERNSEC_ROFS
53030+ {
53031+ .procname = "romount_protect",
53032+ .data = &grsec_enable_rofs,
53033+ .maxlen = sizeof(int),
53034+ .mode = 0600,
53035+ .proc_handler = &proc_dointvec_minmax,
53036+ .extra1 = &one,
53037+ .extra2 = &one,
53038+ },
53039+#endif
53040+ { }
53041+};
53042+#endif
53043diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
53044--- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
53045+++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
53046@@ -0,0 +1,16 @@
53047+#include <linux/kernel.h>
53048+#include <linux/sched.h>
53049+#include <linux/grinternal.h>
53050+#include <linux/module.h>
53051+
53052+void
53053+gr_log_timechange(void)
53054+{
53055+#ifdef CONFIG_GRKERNSEC_TIME
53056+ if (grsec_enable_time)
53057+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
53058+#endif
53059+ return;
53060+}
53061+
53062+EXPORT_SYMBOL(gr_log_timechange);
53063diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
53064--- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
53065+++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
53066@@ -0,0 +1,39 @@
53067+#include <linux/kernel.h>
53068+#include <linux/sched.h>
53069+#include <linux/file.h>
53070+#include <linux/fs.h>
53071+#include <linux/grinternal.h>
53072+
53073+extern int gr_acl_tpe_check(void);
53074+
53075+int
53076+gr_tpe_allow(const struct file *file)
53077+{
53078+#ifdef CONFIG_GRKERNSEC
53079+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
53080+ const struct cred *cred = current_cred();
53081+
53082+ if (cred->uid && ((grsec_enable_tpe &&
53083+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53084+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
53085+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
53086+#else
53087+ in_group_p(grsec_tpe_gid)
53088+#endif
53089+ ) || gr_acl_tpe_check()) &&
53090+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
53091+ (inode->i_mode & S_IWOTH))))) {
53092+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53093+ return 0;
53094+ }
53095+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53096+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
53097+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
53098+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
53099+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53100+ return 0;
53101+ }
53102+#endif
53103+#endif
53104+ return 1;
53105+}
53106diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
53107--- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
53108+++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
53109@@ -0,0 +1,61 @@
53110+#include <linux/err.h>
53111+#include <linux/kernel.h>
53112+#include <linux/sched.h>
53113+#include <linux/mm.h>
53114+#include <linux/scatterlist.h>
53115+#include <linux/crypto.h>
53116+#include <linux/gracl.h>
53117+
53118+
53119+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
53120+#error "crypto and sha256 must be built into the kernel"
53121+#endif
53122+
53123+int
53124+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
53125+{
53126+ char *p;
53127+ struct crypto_hash *tfm;
53128+ struct hash_desc desc;
53129+ struct scatterlist sg;
53130+ unsigned char temp_sum[GR_SHA_LEN];
53131+ volatile int retval = 0;
53132+ volatile int dummy = 0;
53133+ unsigned int i;
53134+
53135+ sg_init_table(&sg, 1);
53136+
53137+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
53138+ if (IS_ERR(tfm)) {
53139+ /* should never happen, since sha256 should be built in */
53140+ return 1;
53141+ }
53142+
53143+ desc.tfm = tfm;
53144+ desc.flags = 0;
53145+
53146+ crypto_hash_init(&desc);
53147+
53148+ p = salt;
53149+ sg_set_buf(&sg, p, GR_SALT_LEN);
53150+ crypto_hash_update(&desc, &sg, sg.length);
53151+
53152+ p = entry->pw;
53153+ sg_set_buf(&sg, p, strlen(p));
53154+
53155+ crypto_hash_update(&desc, &sg, sg.length);
53156+
53157+ crypto_hash_final(&desc, temp_sum);
53158+
53159+ memset(entry->pw, 0, GR_PW_LEN);
53160+
53161+ for (i = 0; i < GR_SHA_LEN; i++)
53162+ if (sum[i] != temp_sum[i])
53163+ retval = 1;
53164+ else
53165+ dummy = 1; // waste a cycle
53166+
53167+ crypto_free_hash(tfm);
53168+
53169+ return retval;
53170+}
53171diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
53172--- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
53173+++ linux-3.0.4/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
53174@@ -0,0 +1,1038 @@
53175+#
53176+# grecurity configuration
53177+#
53178+
53179+menu "Grsecurity"
53180+
53181+config GRKERNSEC
53182+ bool "Grsecurity"
53183+ select CRYPTO
53184+ select CRYPTO_SHA256
53185+ help
53186+ If you say Y here, you will be able to configure many features
53187+ that will enhance the security of your system. It is highly
53188+ recommended that you say Y here and read through the help
53189+ for each option so that you fully understand the features and
53190+ can evaluate their usefulness for your machine.
53191+
53192+choice
53193+ prompt "Security Level"
53194+ depends on GRKERNSEC
53195+ default GRKERNSEC_CUSTOM
53196+
53197+config GRKERNSEC_LOW
53198+ bool "Low"
53199+ select GRKERNSEC_LINK
53200+ select GRKERNSEC_FIFO
53201+ select GRKERNSEC_RANDNET
53202+ select GRKERNSEC_DMESG
53203+ select GRKERNSEC_CHROOT
53204+ select GRKERNSEC_CHROOT_CHDIR
53205+
53206+ help
53207+ If you choose this option, several of the grsecurity options will
53208+ be enabled that will give you greater protection against a number
53209+ of attacks, while assuring that none of your software will have any
53210+ conflicts with the additional security measures. If you run a lot
53211+ of unusual software, or you are having problems with the higher
53212+ security levels, you should say Y here. With this option, the
53213+ following features are enabled:
53214+
53215+ - Linking restrictions
53216+ - FIFO restrictions
53217+ - Restricted dmesg
53218+ - Enforced chdir("/") on chroot
53219+ - Runtime module disabling
53220+
53221+config GRKERNSEC_MEDIUM
53222+ bool "Medium"
53223+ select PAX
53224+ select PAX_EI_PAX
53225+ select PAX_PT_PAX_FLAGS
53226+ select PAX_HAVE_ACL_FLAGS
53227+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53228+ select GRKERNSEC_CHROOT
53229+ select GRKERNSEC_CHROOT_SYSCTL
53230+ select GRKERNSEC_LINK
53231+ select GRKERNSEC_FIFO
53232+ select GRKERNSEC_DMESG
53233+ select GRKERNSEC_RANDNET
53234+ select GRKERNSEC_FORKFAIL
53235+ select GRKERNSEC_TIME
53236+ select GRKERNSEC_SIGNAL
53237+ select GRKERNSEC_CHROOT
53238+ select GRKERNSEC_CHROOT_UNIX
53239+ select GRKERNSEC_CHROOT_MOUNT
53240+ select GRKERNSEC_CHROOT_PIVOT
53241+ select GRKERNSEC_CHROOT_DOUBLE
53242+ select GRKERNSEC_CHROOT_CHDIR
53243+ select GRKERNSEC_CHROOT_MKNOD
53244+ select GRKERNSEC_PROC
53245+ select GRKERNSEC_PROC_USERGROUP
53246+ select PAX_RANDUSTACK
53247+ select PAX_ASLR
53248+ select PAX_RANDMMAP
53249+ select PAX_REFCOUNT if (X86 || SPARC64)
53250+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53251+
53252+ help
53253+ If you say Y here, several features in addition to those included
53254+ in the low additional security level will be enabled. These
53255+ features provide even more security to your system, though in rare
53256+ cases they may be incompatible with very old or poorly written
53257+ software. If you enable this option, make sure that your auth
53258+ service (identd) is running as gid 1001. With this option,
53259+ the following features (in addition to those provided in the
53260+ low additional security level) will be enabled:
53261+
53262+ - Failed fork logging
53263+ - Time change logging
53264+ - Signal logging
53265+ - Deny mounts in chroot
53266+ - Deny double chrooting
53267+ - Deny sysctl writes in chroot
53268+ - Deny mknod in chroot
53269+ - Deny access to abstract AF_UNIX sockets out of chroot
53270+ - Deny pivot_root in chroot
53271+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
53272+ - /proc restrictions with special GID set to 10 (usually wheel)
53273+ - Address Space Layout Randomization (ASLR)
53274+ - Prevent exploitation of most refcount overflows
53275+ - Bounds checking of copying between the kernel and userland
53276+
53277+config GRKERNSEC_HIGH
53278+ bool "High"
53279+ select GRKERNSEC_LINK
53280+ select GRKERNSEC_FIFO
53281+ select GRKERNSEC_DMESG
53282+ select GRKERNSEC_FORKFAIL
53283+ select GRKERNSEC_TIME
53284+ select GRKERNSEC_SIGNAL
53285+ select GRKERNSEC_CHROOT
53286+ select GRKERNSEC_CHROOT_SHMAT
53287+ select GRKERNSEC_CHROOT_UNIX
53288+ select GRKERNSEC_CHROOT_MOUNT
53289+ select GRKERNSEC_CHROOT_FCHDIR
53290+ select GRKERNSEC_CHROOT_PIVOT
53291+ select GRKERNSEC_CHROOT_DOUBLE
53292+ select GRKERNSEC_CHROOT_CHDIR
53293+ select GRKERNSEC_CHROOT_MKNOD
53294+ select GRKERNSEC_CHROOT_CAPS
53295+ select GRKERNSEC_CHROOT_SYSCTL
53296+ select GRKERNSEC_CHROOT_FINDTASK
53297+ select GRKERNSEC_SYSFS_RESTRICT
53298+ select GRKERNSEC_PROC
53299+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53300+ select GRKERNSEC_HIDESYM
53301+ select GRKERNSEC_BRUTE
53302+ select GRKERNSEC_PROC_USERGROUP
53303+ select GRKERNSEC_KMEM
53304+ select GRKERNSEC_RESLOG
53305+ select GRKERNSEC_RANDNET
53306+ select GRKERNSEC_PROC_ADD
53307+ select GRKERNSEC_CHROOT_CHMOD
53308+ select GRKERNSEC_CHROOT_NICE
53309+ select GRKERNSEC_AUDIT_MOUNT
53310+ select GRKERNSEC_MODHARDEN if (MODULES)
53311+ select GRKERNSEC_HARDEN_PTRACE
53312+ select GRKERNSEC_VM86 if (X86_32)
53313+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
53314+ select PAX
53315+ select PAX_RANDUSTACK
53316+ select PAX_ASLR
53317+ select PAX_RANDMMAP
53318+ select PAX_NOEXEC
53319+ select PAX_MPROTECT
53320+ select PAX_EI_PAX
53321+ select PAX_PT_PAX_FLAGS
53322+ select PAX_HAVE_ACL_FLAGS
53323+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
53324+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
53325+ select PAX_RANDKSTACK if (X86_TSC && X86)
53326+ select PAX_SEGMEXEC if (X86_32)
53327+ select PAX_PAGEEXEC
53328+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
53329+ select PAX_EMUTRAMP if (PARISC)
53330+ select PAX_EMUSIGRT if (PARISC)
53331+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
53332+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
53333+ select PAX_REFCOUNT if (X86 || SPARC64)
53334+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
53335+ help
53336+ If you say Y here, many of the features of grsecurity will be
53337+ enabled, which will protect you against many kinds of attacks
53338+ against your system. The heightened security comes at a cost
53339+ of an increased chance of incompatibilities with rare software
53340+ on your machine. Since this security level enables PaX, you should
53341+ view <http://pax.grsecurity.net> and read about the PaX
53342+ project. While you are there, download chpax and run it on
53343+ binaries that cause problems with PaX. Also remember that
53344+ since the /proc restrictions are enabled, you must run your
53345+ identd as gid 1001. This security level enables the following
53346+ features in addition to those listed in the low and medium
53347+ security levels:
53348+
53349+ - Additional /proc restrictions
53350+ - Chmod restrictions in chroot
53351+ - No signals, ptrace, or viewing of processes outside of chroot
53352+ - Capability restrictions in chroot
53353+ - Deny fchdir out of chroot
53354+ - Priority restrictions in chroot
53355+ - Segmentation-based implementation of PaX
53356+ - Mprotect restrictions
53357+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
53358+ - Kernel stack randomization
53359+ - Mount/unmount/remount logging
53360+ - Kernel symbol hiding
53361+ - Prevention of memory exhaustion-based exploits
53362+ - Hardening of module auto-loading
53363+ - Ptrace restrictions
53364+ - Restricted vm86 mode
53365+ - Restricted sysfs/debugfs
53366+ - Active kernel exploit response
53367+
53368+config GRKERNSEC_CUSTOM
53369+ bool "Custom"
53370+ help
53371+ If you say Y here, you will be able to configure every grsecurity
53372+ option, which allows you to enable many more features that aren't
53373+ covered in the basic security levels. These additional features
53374+ include TPE, socket restrictions, and the sysctl system for
53375+ grsecurity. It is advised that you read through the help for
53376+ each option to determine its usefulness in your situation.
53377+
53378+endchoice
53379+
53380+menu "Address Space Protection"
53381+depends on GRKERNSEC
53382+
53383+config GRKERNSEC_KMEM
53384+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
53385+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53386+ help
53387+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53388+ be written to via mmap or otherwise to modify the running kernel.
53389+ /dev/port will also not be allowed to be opened. If you have module
53390+ support disabled, enabling this will close up four ways that are
53391+ currently used to insert malicious code into the running kernel.
53392+ Even with all these features enabled, we still highly recommend that
53393+ you use the RBAC system, as it is still possible for an attacker to
53394+ modify the running kernel through privileged I/O granted by ioperm/iopl.
53395+ If you are not using XFree86, you may be able to stop this additional
53396+ case by enabling the 'Disable privileged I/O' option. Though nothing
53397+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53398+ but only to video memory, which is the only writing we allow in this
53399+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53400+ not be allowed to mprotect it with PROT_WRITE later.
53401+ It is highly recommended that you say Y here if you meet all the
53402+ conditions above.
53403+
53404+config GRKERNSEC_VM86
53405+ bool "Restrict VM86 mode"
53406+ depends on X86_32
53407+
53408+ help
53409+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53410+ make use of a special execution mode on 32bit x86 processors called
53411+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53412+ video cards and will still work with this option enabled. The purpose
53413+ of the option is to prevent exploitation of emulation errors in
53414+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
53415+ Nearly all users should be able to enable this option.
53416+
53417+config GRKERNSEC_IO
53418+ bool "Disable privileged I/O"
53419+ depends on X86
53420+ select RTC_CLASS
53421+ select RTC_INTF_DEV
53422+ select RTC_DRV_CMOS
53423+
53424+ help
53425+ If you say Y here, all ioperm and iopl calls will return an error.
53426+ Ioperm and iopl can be used to modify the running kernel.
53427+ Unfortunately, some programs need this access to operate properly,
53428+ the most notable of which are XFree86 and hwclock. hwclock can be
53429+ remedied by having RTC support in the kernel, so real-time
53430+ clock support is enabled if this option is enabled, to ensure
53431+ that hwclock operates correctly. XFree86 still will not
53432+ operate correctly with this option enabled, so DO NOT CHOOSE Y
53433+ IF YOU USE XFree86. If you use XFree86 and you still want to
53434+ protect your kernel against modification, use the RBAC system.
53435+
53436+config GRKERNSEC_PROC_MEMMAP
53437+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
53438+ default y if (PAX_NOEXEC || PAX_ASLR)
53439+ depends on PAX_NOEXEC || PAX_ASLR
53440+ help
53441+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53442+ give no information about the addresses of its mappings if
53443+ PaX features that rely on random addresses are enabled on the task.
53444+ If you use PaX it is greatly recommended that you say Y here as it
53445+ closes up a hole that makes the full ASLR useless for suid
53446+ binaries.
53447+
53448+config GRKERNSEC_BRUTE
53449+ bool "Deter exploit bruteforcing"
53450+ help
53451+ If you say Y here, attempts to bruteforce exploits against forking
53452+ daemons such as apache or sshd, as well as against suid/sgid binaries
53453+ will be deterred. When a child of a forking daemon is killed by PaX
53454+ or crashes due to an illegal instruction or other suspicious signal,
53455+ the parent process will be delayed 30 seconds upon every subsequent
53456+ fork until the administrator is able to assess the situation and
53457+ restart the daemon.
53458+ In the suid/sgid case, the attempt is logged, the user has all their
53459+ processes terminated, and they are prevented from executing any further
53460+ processes for 15 minutes.
53461+ It is recommended that you also enable signal logging in the auditing
53462+ section so that logs are generated when a process triggers a suspicious
53463+ signal.
53464+ If the sysctl option is enabled, a sysctl option with name
53465+ "deter_bruteforce" is created.
53466+
53467+
53468+config GRKERNSEC_MODHARDEN
53469+ bool "Harden module auto-loading"
53470+ depends on MODULES
53471+ help
53472+ If you say Y here, module auto-loading in response to use of some
53473+ feature implemented by an unloaded module will be restricted to
53474+ root users. Enabling this option helps defend against attacks
53475+ by unprivileged users who abuse the auto-loading behavior to
53476+ cause a vulnerable module to load that is then exploited.
53477+
53478+ If this option prevents a legitimate use of auto-loading for a
53479+ non-root user, the administrator can execute modprobe manually
53480+ with the exact name of the module mentioned in the alert log.
53481+ Alternatively, the administrator can add the module to the list
53482+ of modules loaded at boot by modifying init scripts.
53483+
53484+ Modification of init scripts will most likely be needed on
53485+ Ubuntu servers with encrypted home directory support enabled,
53486+ as the first non-root user logging in will cause the ecb(aes),
53487+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53488+
53489+config GRKERNSEC_HIDESYM
53490+ bool "Hide kernel symbols"
53491+ help
53492+ If you say Y here, getting information on loaded modules, and
53493+ displaying all kernel symbols through a syscall will be restricted
53494+ to users with CAP_SYS_MODULE. For software compatibility reasons,
53495+ /proc/kallsyms will be restricted to the root user. The RBAC
53496+ system can hide that entry even from root.
53497+
53498+ This option also prevents leaking of kernel addresses through
53499+ several /proc entries.
53500+
53501+ Note that this option is only effective provided the following
53502+ conditions are met:
53503+ 1) The kernel using grsecurity is not precompiled by some distribution
53504+ 2) You have also enabled GRKERNSEC_DMESG
53505+ 3) You are using the RBAC system and hiding other files such as your
53506+ kernel image and System.map. Alternatively, enabling this option
53507+ causes the permissions on /boot, /lib/modules, and the kernel
53508+ source directory to change at compile time to prevent
53509+ reading by non-root users.
53510+ If the above conditions are met, this option will aid in providing a
53511+ useful protection against local kernel exploitation of overflows
53512+ and arbitrary read/write vulnerabilities.
53513+
53514+config GRKERNSEC_KERN_LOCKOUT
53515+ bool "Active kernel exploit response"
53516+ depends on X86 || ARM || PPC || SPARC
53517+ help
53518+ If you say Y here, when a PaX alert is triggered due to suspicious
53519+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53520+ or an OOPs occurs due to bad memory accesses, instead of just
53521+ terminating the offending process (and potentially allowing
53522+ a subsequent exploit from the same user), we will take one of two
53523+ actions:
53524+ If the user was root, we will panic the system
53525+ If the user was non-root, we will log the attempt, terminate
53526+ all processes owned by the user, then prevent them from creating
53527+ any new processes until the system is restarted
53528+ This deters repeated kernel exploitation/bruteforcing attempts
53529+ and is useful for later forensics.
53530+
53531+endmenu
53532+menu "Role Based Access Control Options"
53533+depends on GRKERNSEC
53534+
53535+config GRKERNSEC_RBAC_DEBUG
53536+ bool
53537+
53538+config GRKERNSEC_NO_RBAC
53539+ bool "Disable RBAC system"
53540+ help
53541+ If you say Y here, the /dev/grsec device will be removed from the kernel,
53542+ preventing the RBAC system from being enabled. You should only say Y
53543+ here if you have no intention of using the RBAC system, so as to prevent
53544+ an attacker with root access from misusing the RBAC system to hide files
53545+ and processes when loadable module support and /dev/[k]mem have been
53546+ locked down.
53547+
53548+config GRKERNSEC_ACL_HIDEKERN
53549+ bool "Hide kernel processes"
53550+ help
53551+ If you say Y here, all kernel threads will be hidden to all
53552+ processes but those whose subject has the "view hidden processes"
53553+ flag.
53554+
53555+config GRKERNSEC_ACL_MAXTRIES
53556+ int "Maximum tries before password lockout"
53557+ default 3
53558+ help
53559+ This option enforces the maximum number of times a user can attempt
53560+ to authorize themselves with the grsecurity RBAC system before being
53561+ denied the ability to attempt authorization again for a specified time.
53562+ The lower the number, the harder it will be to brute-force a password.
53563+
53564+config GRKERNSEC_ACL_TIMEOUT
53565+ int "Time to wait after max password tries, in seconds"
53566+ default 30
53567+ help
53568+ This option specifies the time the user must wait after attempting to
53569+ authorize to the RBAC system with the maximum number of invalid
53570+ passwords. The higher the number, the harder it will be to brute-force
53571+ a password.
53572+
53573+endmenu
53574+menu "Filesystem Protections"
53575+depends on GRKERNSEC
53576+
53577+config GRKERNSEC_PROC
53578+ bool "Proc restrictions"
53579+ help
53580+ If you say Y here, the permissions of the /proc filesystem
53581+ will be altered to enhance system security and privacy. You MUST
53582+ choose either a user only restriction or a user and group restriction.
53583+ Depending upon the option you choose, you can either restrict users to
53584+ see only the processes they themselves run, or choose a group that can
53585+ view all processes and files normally restricted to root if you choose
53586+ the "restrict to user only" option. NOTE: If you're running identd as
53587+ a non-root user, you will have to run it as the group you specify here.
53588+
53589+config GRKERNSEC_PROC_USER
53590+ bool "Restrict /proc to user only"
53591+ depends on GRKERNSEC_PROC
53592+ help
53593+ If you say Y here, non-root users will only be able to view their own
53594+ processes, and restricts them from viewing network-related information,
53595+ and viewing kernel symbol and module information.
53596+
53597+config GRKERNSEC_PROC_USERGROUP
53598+ bool "Allow special group"
53599+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53600+ help
53601+ If you say Y here, you will be able to select a group that will be
53602+ able to view all processes and network-related information. If you've
53603+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53604+ remain hidden. This option is useful if you want to run identd as
53605+ a non-root user.
53606+
53607+config GRKERNSEC_PROC_GID
53608+ int "GID for special group"
53609+ depends on GRKERNSEC_PROC_USERGROUP
53610+ default 1001
53611+
53612+config GRKERNSEC_PROC_ADD
53613+ bool "Additional restrictions"
53614+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53615+ help
53616+ If you say Y here, additional restrictions will be placed on
53617+ /proc that keep normal users from viewing device information and
53618+ slabinfo information that could be useful for exploits.
53619+
53620+config GRKERNSEC_LINK
53621+ bool "Linking restrictions"
53622+ help
53623+ If you say Y here, /tmp race exploits will be prevented, since users
53624+ will no longer be able to follow symlinks owned by other users in
53625+ world-writable +t directories (e.g. /tmp), unless the owner of the
53626+ symlink is the owner of the directory. users will also not be
53627+ able to hardlink to files they do not own. If the sysctl option is
53628+ enabled, a sysctl option with name "linking_restrictions" is created.
53629+
53630+config GRKERNSEC_FIFO
53631+ bool "FIFO restrictions"
53632+ help
53633+ If you say Y here, users will not be able to write to FIFOs they don't
53634+ own in world-writable +t directories (e.g. /tmp), unless the owner of
53635+ the FIFO is the same owner of the directory it's held in. If the sysctl
53636+ option is enabled, a sysctl option with name "fifo_restrictions" is
53637+ created.
53638+
53639+config GRKERNSEC_SYSFS_RESTRICT
53640+ bool "Sysfs/debugfs restriction"
53641+ depends on SYSFS
53642+ help
53643+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53644+ any filesystem normally mounted under it (e.g. debugfs) will only
53645+ be accessible by root. These filesystems generally provide access
53646+ to hardware and debug information that isn't appropriate for unprivileged
53647+ users of the system. Sysfs and debugfs have also become a large source
53648+ of new vulnerabilities, ranging from infoleaks to local compromise.
53649+ There has been very little oversight with an eye toward security involved
53650+ in adding new exporters of information to these filesystems, so their
53651+ use is discouraged.
53652+ This option is equivalent to a chmod 0700 of the mount paths.
53653+
53654+config GRKERNSEC_ROFS
53655+ bool "Runtime read-only mount protection"
53656+ help
53657+ If you say Y here, a sysctl option with name "romount_protect" will
53658+ be created. By setting this option to 1 at runtime, filesystems
53659+ will be protected in the following ways:
53660+ * No new writable mounts will be allowed
53661+ * Existing read-only mounts won't be able to be remounted read/write
53662+ * Write operations will be denied on all block devices
53663+ This option acts independently of grsec_lock: once it is set to 1,
53664+ it cannot be turned off. Therefore, please be mindful of the resulting
53665+ behavior if this option is enabled in an init script on a read-only
53666+ filesystem. This feature is mainly intended for secure embedded systems.
53667+
53668+config GRKERNSEC_CHROOT
53669+ bool "Chroot jail restrictions"
53670+ help
53671+ If you say Y here, you will be able to choose several options that will
53672+ make breaking out of a chrooted jail much more difficult. If you
53673+ encounter no software incompatibilities with the following options, it
53674+ is recommended that you enable each one.
53675+
53676+config GRKERNSEC_CHROOT_MOUNT
53677+ bool "Deny mounts"
53678+ depends on GRKERNSEC_CHROOT
53679+ help
53680+ If you say Y here, processes inside a chroot will not be able to
53681+ mount or remount filesystems. If the sysctl option is enabled, a
53682+ sysctl option with name "chroot_deny_mount" is created.
53683+
53684+config GRKERNSEC_CHROOT_DOUBLE
53685+ bool "Deny double-chroots"
53686+ depends on GRKERNSEC_CHROOT
53687+ help
53688+ If you say Y here, processes inside a chroot will not be able to chroot
53689+ again outside the chroot. This is a widely used method of breaking
53690+ out of a chroot jail and should not be allowed. If the sysctl
53691+ option is enabled, a sysctl option with name
53692+ "chroot_deny_chroot" is created.
53693+
53694+config GRKERNSEC_CHROOT_PIVOT
53695+ bool "Deny pivot_root in chroot"
53696+ depends on GRKERNSEC_CHROOT
53697+ help
53698+ If you say Y here, processes inside a chroot will not be able to use
53699+ a function called pivot_root() that was introduced in Linux 2.3.41. It
53700+ works similar to chroot in that it changes the root filesystem. This
53701+ function could be misused in a chrooted process to attempt to break out
53702+ of the chroot, and therefore should not be allowed. If the sysctl
53703+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
53704+ created.
53705+
53706+config GRKERNSEC_CHROOT_CHDIR
53707+ bool "Enforce chdir(\"/\") on all chroots"
53708+ depends on GRKERNSEC_CHROOT
53709+ help
53710+ If you say Y here, the current working directory of all newly-chrooted
53711+ applications will be set to the the root directory of the chroot.
53712+ The man page on chroot(2) states:
53713+ Note that this call does not change the current working
53714+ directory, so that `.' can be outside the tree rooted at
53715+ `/'. In particular, the super-user can escape from a
53716+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53717+
53718+ It is recommended that you say Y here, since it's not known to break
53719+ any software. If the sysctl option is enabled, a sysctl option with
53720+ name "chroot_enforce_chdir" is created.
53721+
53722+config GRKERNSEC_CHROOT_CHMOD
53723+ bool "Deny (f)chmod +s"
53724+ depends on GRKERNSEC_CHROOT
53725+ help
53726+ If you say Y here, processes inside a chroot will not be able to chmod
53727+ or fchmod files to make them have suid or sgid bits. This protects
53728+ against another published method of breaking a chroot. If the sysctl
53729+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
53730+ created.
53731+
53732+config GRKERNSEC_CHROOT_FCHDIR
53733+ bool "Deny fchdir out of chroot"
53734+ depends on GRKERNSEC_CHROOT
53735+ help
53736+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
53737+ to a file descriptor of the chrooting process that points to a directory
53738+ outside the filesystem will be stopped. If the sysctl option
53739+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53740+
53741+config GRKERNSEC_CHROOT_MKNOD
53742+ bool "Deny mknod"
53743+ depends on GRKERNSEC_CHROOT
53744+ help
53745+ If you say Y here, processes inside a chroot will not be allowed to
53746+ mknod. The problem with using mknod inside a chroot is that it
53747+ would allow an attacker to create a device entry that is the same
53748+ as one on the physical root of your system, which could range from
53749+ anything from the console device to a device for your harddrive (which
53750+ they could then use to wipe the drive or steal data). It is recommended
53751+ that you say Y here, unless you run into software incompatibilities.
53752+ If the sysctl option is enabled, a sysctl option with name
53753+ "chroot_deny_mknod" is created.
53754+
53755+config GRKERNSEC_CHROOT_SHMAT
53756+ bool "Deny shmat() out of chroot"
53757+ depends on GRKERNSEC_CHROOT
53758+ help
53759+ If you say Y here, processes inside a chroot will not be able to attach
53760+ to shared memory segments that were created outside of the chroot jail.
53761+ It is recommended that you say Y here. If the sysctl option is enabled,
53762+ a sysctl option with name "chroot_deny_shmat" is created.
53763+
53764+config GRKERNSEC_CHROOT_UNIX
53765+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
53766+ depends on GRKERNSEC_CHROOT
53767+ help
53768+ If you say Y here, processes inside a chroot will not be able to
53769+ connect to abstract (meaning not belonging to a filesystem) Unix
53770+ domain sockets that were bound outside of a chroot. It is recommended
53771+ that you say Y here. If the sysctl option is enabled, a sysctl option
53772+ with name "chroot_deny_unix" is created.
53773+
53774+config GRKERNSEC_CHROOT_FINDTASK
53775+ bool "Protect outside processes"
53776+ depends on GRKERNSEC_CHROOT
53777+ help
53778+ If you say Y here, processes inside a chroot will not be able to
53779+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53780+ getsid, or view any process outside of the chroot. If the sysctl
53781+ option is enabled, a sysctl option with name "chroot_findtask" is
53782+ created.
53783+
53784+config GRKERNSEC_CHROOT_NICE
53785+ bool "Restrict priority changes"
53786+ depends on GRKERNSEC_CHROOT
53787+ help
53788+ If you say Y here, processes inside a chroot will not be able to raise
53789+ the priority of processes in the chroot, or alter the priority of
53790+ processes outside the chroot. This provides more security than simply
53791+ removing CAP_SYS_NICE from the process' capability set. If the
53792+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53793+ is created.
53794+
53795+config GRKERNSEC_CHROOT_SYSCTL
53796+ bool "Deny sysctl writes"
53797+ depends on GRKERNSEC_CHROOT
53798+ help
53799+ If you say Y here, an attacker in a chroot will not be able to
53800+ write to sysctl entries, either by sysctl(2) or through a /proc
53801+ interface. It is strongly recommended that you say Y here. If the
53802+ sysctl option is enabled, a sysctl option with name
53803+ "chroot_deny_sysctl" is created.
53804+
53805+config GRKERNSEC_CHROOT_CAPS
53806+ bool "Capability restrictions"
53807+ depends on GRKERNSEC_CHROOT
53808+ help
53809+ If you say Y here, the capabilities on all processes within a
53810+ chroot jail will be lowered to stop module insertion, raw i/o,
53811+ system and net admin tasks, rebooting the system, modifying immutable
53812+ files, modifying IPC owned by another, and changing the system time.
53813+ This is left an option because it can break some apps. Disable this
53814+ if your chrooted apps are having problems performing those kinds of
53815+ tasks. If the sysctl option is enabled, a sysctl option with
53816+ name "chroot_caps" is created.
53817+
53818+endmenu
53819+menu "Kernel Auditing"
53820+depends on GRKERNSEC
53821+
53822+config GRKERNSEC_AUDIT_GROUP
53823+ bool "Single group for auditing"
53824+ help
53825+ If you say Y here, the exec, chdir, and (un)mount logging features
53826+ will only operate on a group you specify. This option is recommended
53827+ if you only want to watch certain users instead of having a large
53828+ amount of logs from the entire system. If the sysctl option is enabled,
53829+ a sysctl option with name "audit_group" is created.
53830+
53831+config GRKERNSEC_AUDIT_GID
53832+ int "GID for auditing"
53833+ depends on GRKERNSEC_AUDIT_GROUP
53834+ default 1007
53835+
53836+config GRKERNSEC_EXECLOG
53837+ bool "Exec logging"
53838+ help
53839+ If you say Y here, all execve() calls will be logged (since the
53840+ other exec*() calls are frontends to execve(), all execution
53841+ will be logged). Useful for shell-servers that like to keep track
53842+ of their users. If the sysctl option is enabled, a sysctl option with
53843+ name "exec_logging" is created.
53844+ WARNING: This option when enabled will produce a LOT of logs, especially
53845+ on an active system.
53846+
53847+config GRKERNSEC_RESLOG
53848+ bool "Resource logging"
53849+ help
53850+ If you say Y here, all attempts to overstep resource limits will
53851+ be logged with the resource name, the requested size, and the current
53852+ limit. It is highly recommended that you say Y here. If the sysctl
53853+ option is enabled, a sysctl option with name "resource_logging" is
53854+ created. If the RBAC system is enabled, the sysctl value is ignored.
53855+
53856+config GRKERNSEC_CHROOT_EXECLOG
53857+ bool "Log execs within chroot"
53858+ help
53859+ If you say Y here, all executions inside a chroot jail will be logged
53860+ to syslog. This can cause a large amount of logs if certain
53861+ applications (eg. djb's daemontools) are installed on the system, and
53862+ is therefore left as an option. If the sysctl option is enabled, a
53863+ sysctl option with name "chroot_execlog" is created.
53864+
53865+config GRKERNSEC_AUDIT_PTRACE
53866+ bool "Ptrace logging"
53867+ help
53868+ If you say Y here, all attempts to attach to a process via ptrace
53869+ will be logged. If the sysctl option is enabled, a sysctl option
53870+ with name "audit_ptrace" is created.
53871+
53872+config GRKERNSEC_AUDIT_CHDIR
53873+ bool "Chdir logging"
53874+ help
53875+ If you say Y here, all chdir() calls will be logged. If the sysctl
53876+ option is enabled, a sysctl option with name "audit_chdir" is created.
53877+
53878+config GRKERNSEC_AUDIT_MOUNT
53879+ bool "(Un)Mount logging"
53880+ help
53881+ If you say Y here, all mounts and unmounts will be logged. If the
53882+ sysctl option is enabled, a sysctl option with name "audit_mount" is
53883+ created.
53884+
53885+config GRKERNSEC_SIGNAL
53886+ bool "Signal logging"
53887+ help
53888+ If you say Y here, certain important signals will be logged, such as
53889+ SIGSEGV, which will as a result inform you of when a error in a program
53890+ occurred, which in some cases could mean a possible exploit attempt.
53891+ If the sysctl option is enabled, a sysctl option with name
53892+ "signal_logging" is created.
53893+
53894+config GRKERNSEC_FORKFAIL
53895+ bool "Fork failure logging"
53896+ help
53897+ If you say Y here, all failed fork() attempts will be logged.
53898+ This could suggest a fork bomb, or someone attempting to overstep
53899+ their process limit. If the sysctl option is enabled, a sysctl option
53900+ with name "forkfail_logging" is created.
53901+
53902+config GRKERNSEC_TIME
53903+ bool "Time change logging"
53904+ help
53905+ If you say Y here, any changes of the system clock will be logged.
53906+ If the sysctl option is enabled, a sysctl option with name
53907+ "timechange_logging" is created.
53908+
53909+config GRKERNSEC_PROC_IPADDR
53910+ bool "/proc/<pid>/ipaddr support"
53911+ help
53912+ If you say Y here, a new entry will be added to each /proc/<pid>
53913+ directory that contains the IP address of the person using the task.
53914+ The IP is carried across local TCP and AF_UNIX stream sockets.
53915+ This information can be useful for IDS/IPSes to perform remote response
53916+ to a local attack. The entry is readable by only the owner of the
53917+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53918+ the RBAC system), and thus does not create privacy concerns.
53919+
53920+config GRKERNSEC_RWXMAP_LOG
53921+ bool 'Denied RWX mmap/mprotect logging'
53922+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53923+ help
53924+ If you say Y here, calls to mmap() and mprotect() with explicit
53925+ usage of PROT_WRITE and PROT_EXEC together will be logged when
53926+ denied by the PAX_MPROTECT feature. If the sysctl option is
53927+ enabled, a sysctl option with name "rwxmap_logging" is created.
53928+
53929+config GRKERNSEC_AUDIT_TEXTREL
53930+ bool 'ELF text relocations logging (READ HELP)'
53931+ depends on PAX_MPROTECT
53932+ help
53933+ If you say Y here, text relocations will be logged with the filename
53934+ of the offending library or binary. The purpose of the feature is
53935+ to help Linux distribution developers get rid of libraries and
53936+ binaries that need text relocations which hinder the future progress
53937+ of PaX. Only Linux distribution developers should say Y here, and
53938+ never on a production machine, as this option creates an information
53939+ leak that could aid an attacker in defeating the randomization of
53940+ a single memory region. If the sysctl option is enabled, a sysctl
53941+ option with name "audit_textrel" is created.
53942+
53943+endmenu
53944+
53945+menu "Executable Protections"
53946+depends on GRKERNSEC
53947+
53948+config GRKERNSEC_DMESG
53949+ bool "Dmesg(8) restriction"
53950+ help
53951+ If you say Y here, non-root users will not be able to use dmesg(8)
53952+ to view up to the last 4kb of messages in the kernel's log buffer.
53953+ The kernel's log buffer often contains kernel addresses and other
53954+ identifying information useful to an attacker in fingerprinting a
53955+ system for a targeted exploit.
53956+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
53957+ created.
53958+
53959+config GRKERNSEC_HARDEN_PTRACE
53960+ bool "Deter ptrace-based process snooping"
53961+ help
53962+ If you say Y here, TTY sniffers and other malicious monitoring
53963+ programs implemented through ptrace will be defeated. If you
53964+ have been using the RBAC system, this option has already been
53965+ enabled for several years for all users, with the ability to make
53966+ fine-grained exceptions.
53967+
53968+ This option only affects the ability of non-root users to ptrace
53969+ processes that are not a descendent of the ptracing process.
53970+ This means that strace ./binary and gdb ./binary will still work,
53971+ but attaching to arbitrary processes will not. If the sysctl
53972+ option is enabled, a sysctl option with name "harden_ptrace" is
53973+ created.
53974+
53975+config GRKERNSEC_TPE
53976+ bool "Trusted Path Execution (TPE)"
53977+ help
53978+ If you say Y here, you will be able to choose a gid to add to the
53979+ supplementary groups of users you want to mark as "untrusted."
53980+ These users will not be able to execute any files that are not in
53981+ root-owned directories writable only by root. If the sysctl option
53982+ is enabled, a sysctl option with name "tpe" is created.
53983+
53984+config GRKERNSEC_TPE_ALL
53985+ bool "Partially restrict all non-root users"
53986+ depends on GRKERNSEC_TPE
53987+ help
53988+ If you say Y here, all non-root users will be covered under
53989+ a weaker TPE restriction. This is separate from, and in addition to,
53990+ the main TPE options that you have selected elsewhere. Thus, if a
53991+ "trusted" GID is chosen, this restriction applies to even that GID.
53992+ Under this restriction, all non-root users will only be allowed to
53993+ execute files in directories they own that are not group or
53994+ world-writable, or in directories owned by root and writable only by
53995+ root. If the sysctl option is enabled, a sysctl option with name
53996+ "tpe_restrict_all" is created.
53997+
53998+config GRKERNSEC_TPE_INVERT
53999+ bool "Invert GID option"
54000+ depends on GRKERNSEC_TPE
54001+ help
54002+ If you say Y here, the group you specify in the TPE configuration will
54003+ decide what group TPE restrictions will be *disabled* for. This
54004+ option is useful if you want TPE restrictions to be applied to most
54005+ users on the system. If the sysctl option is enabled, a sysctl option
54006+ with name "tpe_invert" is created. Unlike other sysctl options, this
54007+ entry will default to on for backward-compatibility.
54008+
54009+config GRKERNSEC_TPE_GID
54010+ int "GID for untrusted users"
54011+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
54012+ default 1005
54013+ help
54014+ Setting this GID determines what group TPE restrictions will be
54015+ *enabled* for. If the sysctl option is enabled, a sysctl option
54016+ with name "tpe_gid" is created.
54017+
54018+config GRKERNSEC_TPE_GID
54019+ int "GID for trusted users"
54020+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54021+ default 1005
54022+ help
54023+ Setting this GID determines what group TPE restrictions will be
54024+ *disabled* for. If the sysctl option is enabled, a sysctl option
54025+ with name "tpe_gid" is created.
54026+
54027+endmenu
54028+menu "Network Protections"
54029+depends on GRKERNSEC
54030+
54031+config GRKERNSEC_RANDNET
54032+ bool "Larger entropy pools"
54033+ help
54034+ If you say Y here, the entropy pools used for many features of Linux
54035+ and grsecurity will be doubled in size. Since several grsecurity
54036+ features use additional randomness, it is recommended that you say Y
54037+ here. Saying Y here has a similar effect as modifying
54038+ /proc/sys/kernel/random/poolsize.
54039+
54040+config GRKERNSEC_BLACKHOLE
54041+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54042+ depends on NET
54043+ help
54044+ If you say Y here, neither TCP resets nor ICMP
54045+ destination-unreachable packets will be sent in response to packets
54046+ sent to ports for which no associated listening process exists.
54047+ This feature supports both IPV4 and IPV6 and exempts the
54048+ loopback interface from blackholing. Enabling this feature
54049+ makes a host more resilient to DoS attacks and reduces network
54050+ visibility against scanners.
54051+
54052+ The blackhole feature as-implemented is equivalent to the FreeBSD
54053+ blackhole feature, as it prevents RST responses to all packets, not
54054+ just SYNs. Under most application behavior this causes no
54055+ problems, but applications (like haproxy) may not close certain
54056+ connections in a way that cleanly terminates them on the remote
54057+ end, leaving the remote host in LAST_ACK state. Because of this
54058+ side-effect and to prevent intentional LAST_ACK DoSes, this
54059+ feature also adds automatic mitigation against such attacks.
54060+ The mitigation drastically reduces the amount of time a socket
54061+ can spend in LAST_ACK state. If you're using haproxy and not
54062+ all servers it connects to have this option enabled, consider
54063+ disabling this feature on the haproxy host.
54064+
54065+ If the sysctl option is enabled, two sysctl options with names
54066+ "ip_blackhole" and "lastack_retries" will be created.
54067+ While "ip_blackhole" takes the standard zero/non-zero on/off
54068+ toggle, "lastack_retries" uses the same kinds of values as
54069+ "tcp_retries1" and "tcp_retries2". The default value of 4
54070+ prevents a socket from lasting more than 45 seconds in LAST_ACK
54071+ state.
54072+
54073+config GRKERNSEC_SOCKET
54074+ bool "Socket restrictions"
54075+ depends on NET
54076+ help
54077+ If you say Y here, you will be able to choose from several options.
54078+ If you assign a GID on your system and add it to the supplementary
54079+ groups of users you want to restrict socket access to, this patch
54080+ will perform up to three things, based on the option(s) you choose.
54081+
54082+config GRKERNSEC_SOCKET_ALL
54083+ bool "Deny any sockets to group"
54084+ depends on GRKERNSEC_SOCKET
54085+ help
54086+ If you say Y here, you will be able to choose a GID of whose users will
54087+ be unable to connect to other hosts from your machine or run server
54088+ applications from your machine. If the sysctl option is enabled, a
54089+ sysctl option with name "socket_all" is created.
54090+
54091+config GRKERNSEC_SOCKET_ALL_GID
54092+ int "GID to deny all sockets for"
54093+ depends on GRKERNSEC_SOCKET_ALL
54094+ default 1004
54095+ help
54096+ Here you can choose the GID to disable socket access for. Remember to
54097+ add the users you want socket access disabled for to the GID
54098+ specified here. If the sysctl option is enabled, a sysctl option
54099+ with name "socket_all_gid" is created.
54100+
54101+config GRKERNSEC_SOCKET_CLIENT
54102+ bool "Deny client sockets to group"
54103+ depends on GRKERNSEC_SOCKET
54104+ help
54105+ If you say Y here, you will be able to choose a GID of whose users will
54106+ be unable to connect to other hosts from your machine, but will be
54107+ able to run servers. If this option is enabled, all users in the group
54108+ you specify will have to use passive mode when initiating ftp transfers
54109+ from the shell on your machine. If the sysctl option is enabled, a
54110+ sysctl option with name "socket_client" is created.
54111+
54112+config GRKERNSEC_SOCKET_CLIENT_GID
54113+ int "GID to deny client sockets for"
54114+ depends on GRKERNSEC_SOCKET_CLIENT
54115+ default 1003
54116+ help
54117+ Here you can choose the GID to disable client socket access for.
54118+ Remember to add the users you want client socket access disabled for to
54119+ the GID specified here. If the sysctl option is enabled, a sysctl
54120+ option with name "socket_client_gid" is created.
54121+
54122+config GRKERNSEC_SOCKET_SERVER
54123+ bool "Deny server sockets to group"
54124+ depends on GRKERNSEC_SOCKET
54125+ help
54126+ If you say Y here, you will be able to choose a GID of whose users will
54127+ be unable to run server applications from your machine. If the sysctl
54128+ option is enabled, a sysctl option with name "socket_server" is created.
54129+
54130+config GRKERNSEC_SOCKET_SERVER_GID
54131+ int "GID to deny server sockets for"
54132+ depends on GRKERNSEC_SOCKET_SERVER
54133+ default 1002
54134+ help
54135+ Here you can choose the GID to disable server socket access for.
54136+ Remember to add the users you want server socket access disabled for to
54137+ the GID specified here. If the sysctl option is enabled, a sysctl
54138+ option with name "socket_server_gid" is created.
54139+
54140+endmenu
54141+menu "Sysctl support"
54142+depends on GRKERNSEC && SYSCTL
54143+
54144+config GRKERNSEC_SYSCTL
54145+ bool "Sysctl support"
54146+ help
54147+ If you say Y here, you will be able to change the options that
54148+ grsecurity runs with at bootup, without having to recompile your
54149+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54150+ to enable (1) or disable (0) various features. All the sysctl entries
54151+ are mutable until the "grsec_lock" entry is set to a non-zero value.
54152+ All features enabled in the kernel configuration are disabled at boot
54153+ if you do not say Y to the "Turn on features by default" option.
54154+ All options should be set at startup, and the grsec_lock entry should
54155+ be set to a non-zero value after all the options are set.
54156+ *THIS IS EXTREMELY IMPORTANT*
54157+
54158+config GRKERNSEC_SYSCTL_DISTRO
54159+ bool "Extra sysctl support for distro makers (READ HELP)"
54160+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54161+ help
54162+ If you say Y here, additional sysctl options will be created
54163+ for features that affect processes running as root. Therefore,
54164+ it is critical when using this option that the grsec_lock entry be
54165+ enabled after boot. Only distros with prebuilt kernel packages
54166+ with this option enabled that can ensure grsec_lock is enabled
54167+ after boot should use this option.
54168+ *Failure to set grsec_lock after boot makes all grsec features
54169+ this option covers useless*
54170+
54171+ Currently this option creates the following sysctl entries:
54172+ "Disable Privileged I/O": "disable_priv_io"
54173+
54174+config GRKERNSEC_SYSCTL_ON
54175+ bool "Turn on features by default"
54176+ depends on GRKERNSEC_SYSCTL
54177+ help
54178+ If you say Y here, instead of having all features enabled in the
54179+ kernel configuration disabled at boot time, the features will be
54180+ enabled at boot time. It is recommended you say Y here unless
54181+ there is some reason you would want all sysctl-tunable features to
54182+ be disabled by default. As mentioned elsewhere, it is important
54183+ to enable the grsec_lock entry once you have finished modifying
54184+ the sysctl entries.
54185+
54186+endmenu
54187+menu "Logging Options"
54188+depends on GRKERNSEC
54189+
54190+config GRKERNSEC_FLOODTIME
54191+ int "Seconds in between log messages (minimum)"
54192+ default 10
54193+ help
54194+ This option allows you to enforce the number of seconds between
54195+ grsecurity log messages. The default should be suitable for most
54196+ people, however, if you choose to change it, choose a value small enough
54197+ to allow informative logs to be produced, but large enough to
54198+ prevent flooding.
54199+
54200+config GRKERNSEC_FLOODBURST
54201+ int "Number of messages in a burst (maximum)"
54202+ default 6
54203+ help
54204+ This option allows you to choose the maximum number of messages allowed
54205+ within the flood time interval you chose in a separate option. The
54206+ default should be suitable for most people, however if you find that
54207+ many of your logs are being interpreted as flooding, you may want to
54208+ raise this value.
54209+
54210+endmenu
54211+
54212+endmenu
54213diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
54214--- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54215+++ linux-3.0.4/grsecurity/Makefile 2011-09-14 23:29:56.000000000 -0400
54216@@ -0,0 +1,35 @@
54217+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54218+# during 2001-2009 it has been completely redesigned by Brad Spengler
54219+# into an RBAC system
54220+#
54221+# All code in this directory and various hooks inserted throughout the kernel
54222+# are copyright Brad Spengler - Open Source Security, Inc., and released
54223+# under the GPL v2 or higher
54224+
54225+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54226+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
54227+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54228+
54229+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54230+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54231+ gracl_learn.o grsec_log.o
54232+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54233+
54234+ifdef CONFIG_NET
54235+obj-y += grsec_sock.o
54236+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54237+endif
54238+
54239+ifndef CONFIG_GRKERNSEC
54240+obj-y += grsec_disabled.o
54241+endif
54242+
54243+ifdef CONFIG_GRKERNSEC_HIDESYM
54244+extra-y := grsec_hidesym.o
54245+$(obj)/grsec_hidesym.o:
54246+ @-chmod -f 500 /boot
54247+ @-chmod -f 500 /lib/modules
54248+ @-chmod -f 500 /lib64/modules
54249+ @-chmod -f 700 .
54250+ @echo ' grsec: protected kernel image paths'
54251+endif
54252diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
54253--- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
54254+++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
54255@@ -107,7 +107,7 @@ struct acpi_device_ops {
54256 acpi_op_bind bind;
54257 acpi_op_unbind unbind;
54258 acpi_op_notify notify;
54259-};
54260+} __no_const;
54261
54262 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
54263
54264diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
54265--- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
54266+++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
54267@@ -22,6 +22,12 @@
54268
54269 typedef atomic64_t atomic_long_t;
54270
54271+#ifdef CONFIG_PAX_REFCOUNT
54272+typedef atomic64_unchecked_t atomic_long_unchecked_t;
54273+#else
54274+typedef atomic64_t atomic_long_unchecked_t;
54275+#endif
54276+
54277 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
54278
54279 static inline long atomic_long_read(atomic_long_t *l)
54280@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
54281 return (long)atomic64_read(v);
54282 }
54283
54284+#ifdef CONFIG_PAX_REFCOUNT
54285+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54286+{
54287+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54288+
54289+ return (long)atomic64_read_unchecked(v);
54290+}
54291+#endif
54292+
54293 static inline void atomic_long_set(atomic_long_t *l, long i)
54294 {
54295 atomic64_t *v = (atomic64_t *)l;
54296@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
54297 atomic64_set(v, i);
54298 }
54299
54300+#ifdef CONFIG_PAX_REFCOUNT
54301+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54302+{
54303+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54304+
54305+ atomic64_set_unchecked(v, i);
54306+}
54307+#endif
54308+
54309 static inline void atomic_long_inc(atomic_long_t *l)
54310 {
54311 atomic64_t *v = (atomic64_t *)l;
54312@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
54313 atomic64_inc(v);
54314 }
54315
54316+#ifdef CONFIG_PAX_REFCOUNT
54317+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54318+{
54319+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54320+
54321+ atomic64_inc_unchecked(v);
54322+}
54323+#endif
54324+
54325 static inline void atomic_long_dec(atomic_long_t *l)
54326 {
54327 atomic64_t *v = (atomic64_t *)l;
54328@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
54329 atomic64_dec(v);
54330 }
54331
54332+#ifdef CONFIG_PAX_REFCOUNT
54333+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54334+{
54335+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54336+
54337+ atomic64_dec_unchecked(v);
54338+}
54339+#endif
54340+
54341 static inline void atomic_long_add(long i, atomic_long_t *l)
54342 {
54343 atomic64_t *v = (atomic64_t *)l;
54344@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
54345 atomic64_add(i, v);
54346 }
54347
54348+#ifdef CONFIG_PAX_REFCOUNT
54349+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54350+{
54351+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54352+
54353+ atomic64_add_unchecked(i, v);
54354+}
54355+#endif
54356+
54357 static inline void atomic_long_sub(long i, atomic_long_t *l)
54358 {
54359 atomic64_t *v = (atomic64_t *)l;
54360@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
54361 atomic64_sub(i, v);
54362 }
54363
54364+#ifdef CONFIG_PAX_REFCOUNT
54365+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
54366+{
54367+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54368+
54369+ atomic64_sub_unchecked(i, v);
54370+}
54371+#endif
54372+
54373 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
54374 {
54375 atomic64_t *v = (atomic64_t *)l;
54376@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
54377 return (long)atomic64_inc_return(v);
54378 }
54379
54380+#ifdef CONFIG_PAX_REFCOUNT
54381+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54382+{
54383+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54384+
54385+ return (long)atomic64_inc_return_unchecked(v);
54386+}
54387+#endif
54388+
54389 static inline long atomic_long_dec_return(atomic_long_t *l)
54390 {
54391 atomic64_t *v = (atomic64_t *)l;
54392@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
54393
54394 typedef atomic_t atomic_long_t;
54395
54396+#ifdef CONFIG_PAX_REFCOUNT
54397+typedef atomic_unchecked_t atomic_long_unchecked_t;
54398+#else
54399+typedef atomic_t atomic_long_unchecked_t;
54400+#endif
54401+
54402 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
54403 static inline long atomic_long_read(atomic_long_t *l)
54404 {
54405@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
54406 return (long)atomic_read(v);
54407 }
54408
54409+#ifdef CONFIG_PAX_REFCOUNT
54410+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54411+{
54412+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54413+
54414+ return (long)atomic_read_unchecked(v);
54415+}
54416+#endif
54417+
54418 static inline void atomic_long_set(atomic_long_t *l, long i)
54419 {
54420 atomic_t *v = (atomic_t *)l;
54421@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
54422 atomic_set(v, i);
54423 }
54424
54425+#ifdef CONFIG_PAX_REFCOUNT
54426+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54427+{
54428+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54429+
54430+ atomic_set_unchecked(v, i);
54431+}
54432+#endif
54433+
54434 static inline void atomic_long_inc(atomic_long_t *l)
54435 {
54436 atomic_t *v = (atomic_t *)l;
54437@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
54438 atomic_inc(v);
54439 }
54440
54441+#ifdef CONFIG_PAX_REFCOUNT
54442+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54443+{
54444+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54445+
54446+ atomic_inc_unchecked(v);
54447+}
54448+#endif
54449+
54450 static inline void atomic_long_dec(atomic_long_t *l)
54451 {
54452 atomic_t *v = (atomic_t *)l;
54453@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
54454 atomic_dec(v);
54455 }
54456
54457+#ifdef CONFIG_PAX_REFCOUNT
54458+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54459+{
54460+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54461+
54462+ atomic_dec_unchecked(v);
54463+}
54464+#endif
54465+
54466 static inline void atomic_long_add(long i, atomic_long_t *l)
54467 {
54468 atomic_t *v = (atomic_t *)l;
54469@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
54470 atomic_add(i, v);
54471 }
54472
54473+#ifdef CONFIG_PAX_REFCOUNT
54474+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54475+{
54476+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54477+
54478+ atomic_add_unchecked(i, v);
54479+}
54480+#endif
54481+
54482 static inline void atomic_long_sub(long i, atomic_long_t *l)
54483 {
54484 atomic_t *v = (atomic_t *)l;
54485@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
54486 atomic_sub(i, v);
54487 }
54488
54489+#ifdef CONFIG_PAX_REFCOUNT
54490+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
54491+{
54492+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54493+
54494+ atomic_sub_unchecked(i, v);
54495+}
54496+#endif
54497+
54498 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
54499 {
54500 atomic_t *v = (atomic_t *)l;
54501@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
54502 return (long)atomic_inc_return(v);
54503 }
54504
54505+#ifdef CONFIG_PAX_REFCOUNT
54506+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54507+{
54508+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54509+
54510+ return (long)atomic_inc_return_unchecked(v);
54511+}
54512+#endif
54513+
54514 static inline long atomic_long_dec_return(atomic_long_t *l)
54515 {
54516 atomic_t *v = (atomic_t *)l;
54517@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
54518
54519 #endif /* BITS_PER_LONG == 64 */
54520
54521+#ifdef CONFIG_PAX_REFCOUNT
54522+static inline void pax_refcount_needs_these_functions(void)
54523+{
54524+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
54525+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
54526+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
54527+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
54528+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
54529+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
54530+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
54531+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
54532+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
54533+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
54534+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
54535+
54536+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
54537+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
54538+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
54539+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
54540+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
54541+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
54542+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54543+}
54544+#else
54545+#define atomic_read_unchecked(v) atomic_read(v)
54546+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54547+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54548+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54549+#define atomic_inc_unchecked(v) atomic_inc(v)
54550+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54551+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54552+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54553+#define atomic_dec_unchecked(v) atomic_dec(v)
54554+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54555+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54556+
54557+#define atomic_long_read_unchecked(v) atomic_long_read(v)
54558+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54559+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54560+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
54561+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54562+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54563+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54564+#endif
54565+
54566 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54567diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
54568--- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
54569+++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
54570@@ -6,7 +6,7 @@
54571 * cache lines need to provide their own cache.h.
54572 */
54573
54574-#define L1_CACHE_SHIFT 5
54575-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54576+#define L1_CACHE_SHIFT 5UL
54577+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
54578
54579 #endif /* __ASM_GENERIC_CACHE_H */
54580diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
54581--- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
54582+++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
54583@@ -46,6 +46,8 @@ typedef unsigned int u32;
54584 typedef signed long s64;
54585 typedef unsigned long u64;
54586
54587+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54588+
54589 #define S8_C(x) x
54590 #define U8_C(x) x ## U
54591 #define S16_C(x) x
54592diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
54593--- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
54594+++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
54595@@ -51,6 +51,8 @@ typedef unsigned int u32;
54596 typedef signed long long s64;
54597 typedef unsigned long long u64;
54598
54599+typedef unsigned long long intoverflow_t;
54600+
54601 #define S8_C(x) x
54602 #define U8_C(x) x ## U
54603 #define S16_C(x) x
54604diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
54605--- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
54606+++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
54607@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
54608 KMAP_D(17) KM_NMI,
54609 KMAP_D(18) KM_NMI_PTE,
54610 KMAP_D(19) KM_KDB,
54611+KMAP_D(20) KM_CLEARPAGE,
54612 /*
54613 * Remember to update debug_kmap_atomic() when adding new kmap types!
54614 */
54615-KMAP_D(20) KM_TYPE_NR
54616+KMAP_D(21) KM_TYPE_NR
54617 };
54618
54619 #undef KMAP_D
54620diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
54621--- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
54622+++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
54623@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
54624 #endif /* __HAVE_ARCH_PMD_WRITE */
54625 #endif
54626
54627+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54628+static inline unsigned long pax_open_kernel(void) { return 0; }
54629+#endif
54630+
54631+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54632+static inline unsigned long pax_close_kernel(void) { return 0; }
54633+#endif
54634+
54635 #endif /* !__ASSEMBLY__ */
54636
54637 #endif /* _ASM_GENERIC_PGTABLE_H */
54638diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
54639--- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
54640+++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
54641@@ -1,14 +1,19 @@
54642 #ifndef _PGTABLE_NOPMD_H
54643 #define _PGTABLE_NOPMD_H
54644
54645-#ifndef __ASSEMBLY__
54646-
54647 #include <asm-generic/pgtable-nopud.h>
54648
54649-struct mm_struct;
54650-
54651 #define __PAGETABLE_PMD_FOLDED
54652
54653+#define PMD_SHIFT PUD_SHIFT
54654+#define PTRS_PER_PMD 1
54655+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54656+#define PMD_MASK (~(PMD_SIZE-1))
54657+
54658+#ifndef __ASSEMBLY__
54659+
54660+struct mm_struct;
54661+
54662 /*
54663 * Having the pmd type consist of a pud gets the size right, and allows
54664 * us to conceptually access the pud entry that this pmd is folded into
54665@@ -16,11 +21,6 @@ struct mm_struct;
54666 */
54667 typedef struct { pud_t pud; } pmd_t;
54668
54669-#define PMD_SHIFT PUD_SHIFT
54670-#define PTRS_PER_PMD 1
54671-#define PMD_SIZE (1UL << PMD_SHIFT)
54672-#define PMD_MASK (~(PMD_SIZE-1))
54673-
54674 /*
54675 * The "pud_xxx()" functions here are trivial for a folded two-level
54676 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54677diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
54678--- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
54679+++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
54680@@ -1,10 +1,15 @@
54681 #ifndef _PGTABLE_NOPUD_H
54682 #define _PGTABLE_NOPUD_H
54683
54684-#ifndef __ASSEMBLY__
54685-
54686 #define __PAGETABLE_PUD_FOLDED
54687
54688+#define PUD_SHIFT PGDIR_SHIFT
54689+#define PTRS_PER_PUD 1
54690+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54691+#define PUD_MASK (~(PUD_SIZE-1))
54692+
54693+#ifndef __ASSEMBLY__
54694+
54695 /*
54696 * Having the pud type consist of a pgd gets the size right, and allows
54697 * us to conceptually access the pgd entry that this pud is folded into
54698@@ -12,11 +17,6 @@
54699 */
54700 typedef struct { pgd_t pgd; } pud_t;
54701
54702-#define PUD_SHIFT PGDIR_SHIFT
54703-#define PTRS_PER_PUD 1
54704-#define PUD_SIZE (1UL << PUD_SHIFT)
54705-#define PUD_MASK (~(PUD_SIZE-1))
54706-
54707 /*
54708 * The "pgd_xxx()" functions here are trivial for a folded two-level
54709 * setup: the pud is never bad, and a pud always exists (as it's folded
54710diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
54711--- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
54712+++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
54713@@ -217,6 +217,7 @@
54714 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54715 VMLINUX_SYMBOL(__start_rodata) = .; \
54716 *(.rodata) *(.rodata.*) \
54717+ *(.data..read_only) \
54718 *(__vermagic) /* Kernel version magic */ \
54719 . = ALIGN(8); \
54720 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
54721@@ -723,17 +724,18 @@
54722 * section in the linker script will go there too. @phdr should have
54723 * a leading colon.
54724 *
54725- * Note that this macros defines __per_cpu_load as an absolute symbol.
54726+ * Note that this macros defines per_cpu_load as an absolute symbol.
54727 * If there is no need to put the percpu section at a predetermined
54728 * address, use PERCPU_SECTION.
54729 */
54730 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
54731- VMLINUX_SYMBOL(__per_cpu_load) = .; \
54732- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54733+ per_cpu_load = .; \
54734+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54735 - LOAD_OFFSET) { \
54736+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54737 PERCPU_INPUT(cacheline) \
54738 } phdr \
54739- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
54740+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
54741
54742 /**
54743 * PERCPU_SECTION - define output section for percpu area, simple version
54744diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
54745--- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
54746+++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
54747@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
54748
54749 /* disable crtc when not in use - more explicit than dpms off */
54750 void (*disable)(struct drm_crtc *crtc);
54751-};
54752+} __no_const;
54753
54754 struct drm_encoder_helper_funcs {
54755 void (*dpms)(struct drm_encoder *encoder, int mode);
54756@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
54757 struct drm_connector *connector);
54758 /* disable encoder when not in use - more explicit than dpms off */
54759 void (*disable)(struct drm_encoder *encoder);
54760-};
54761+} __no_const;
54762
54763 struct drm_connector_helper_funcs {
54764 int (*get_modes)(struct drm_connector *connector);
54765diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
54766--- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
54767+++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
54768@@ -73,6 +73,7 @@
54769 #include <linux/workqueue.h>
54770 #include <linux/poll.h>
54771 #include <asm/pgalloc.h>
54772+#include <asm/local.h>
54773 #include "drm.h"
54774
54775 #include <linux/idr.h>
54776@@ -1033,7 +1034,7 @@ struct drm_device {
54777
54778 /** \name Usage Counters */
54779 /*@{ */
54780- int open_count; /**< Outstanding files open */
54781+ local_t open_count; /**< Outstanding files open */
54782 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54783 atomic_t vma_count; /**< Outstanding vma areas open */
54784 int buf_use; /**< Buffers in use -- cannot alloc */
54785@@ -1044,7 +1045,7 @@ struct drm_device {
54786 /*@{ */
54787 unsigned long counters;
54788 enum drm_stat_type types[15];
54789- atomic_t counts[15];
54790+ atomic_unchecked_t counts[15];
54791 /*@} */
54792
54793 struct list_head filelist;
54794diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
54795--- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
54796+++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
54797@@ -47,7 +47,7 @@
54798
54799 struct ttm_mem_shrink {
54800 int (*do_shrink) (struct ttm_mem_shrink *);
54801-};
54802+} __no_const;
54803
54804 /**
54805 * struct ttm_mem_global - Global memory accounting structure.
54806diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
54807--- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
54808+++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
54809@@ -39,6 +39,14 @@ enum machine_type {
54810 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54811 };
54812
54813+/* Constants for the N_FLAGS field */
54814+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54815+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54816+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54817+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54818+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54819+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54820+
54821 #if !defined (N_MAGIC)
54822 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54823 #endif
54824diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
54825--- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
54826+++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
54827@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54828 #endif
54829
54830 struct k_atm_aal_stats {
54831-#define __HANDLE_ITEM(i) atomic_t i
54832+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54833 __AAL_STAT_ITEMS
54834 #undef __HANDLE_ITEM
54835 };
54836diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
54837--- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
54838+++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
54839@@ -88,6 +88,7 @@ struct linux_binfmt {
54840 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54841 int (*load_shlib)(struct file *);
54842 int (*core_dump)(struct coredump_params *cprm);
54843+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54844 unsigned long min_coredump; /* minimal dump size */
54845 };
54846
54847diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
54848--- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
54849+++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
54850@@ -1308,7 +1308,7 @@ struct block_device_operations {
54851 /* this callback is with swap_lock and sometimes page table lock held */
54852 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
54853 struct module *owner;
54854-};
54855+} __do_const;
54856
54857 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54858 unsigned long);
54859diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
54860--- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
54861+++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
54862@@ -161,7 +161,7 @@ struct blk_trace {
54863 struct dentry *dir;
54864 struct dentry *dropped_file;
54865 struct dentry *msg_file;
54866- atomic_t dropped;
54867+ atomic_unchecked_t dropped;
54868 };
54869
54870 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54871diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
54872--- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
54873+++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
54874@@ -42,51 +42,51 @@
54875
54876 static inline __le64 __cpu_to_le64p(const __u64 *p)
54877 {
54878- return (__force __le64)*p;
54879+ return (__force const __le64)*p;
54880 }
54881 static inline __u64 __le64_to_cpup(const __le64 *p)
54882 {
54883- return (__force __u64)*p;
54884+ return (__force const __u64)*p;
54885 }
54886 static inline __le32 __cpu_to_le32p(const __u32 *p)
54887 {
54888- return (__force __le32)*p;
54889+ return (__force const __le32)*p;
54890 }
54891 static inline __u32 __le32_to_cpup(const __le32 *p)
54892 {
54893- return (__force __u32)*p;
54894+ return (__force const __u32)*p;
54895 }
54896 static inline __le16 __cpu_to_le16p(const __u16 *p)
54897 {
54898- return (__force __le16)*p;
54899+ return (__force const __le16)*p;
54900 }
54901 static inline __u16 __le16_to_cpup(const __le16 *p)
54902 {
54903- return (__force __u16)*p;
54904+ return (__force const __u16)*p;
54905 }
54906 static inline __be64 __cpu_to_be64p(const __u64 *p)
54907 {
54908- return (__force __be64)__swab64p(p);
54909+ return (__force const __be64)__swab64p(p);
54910 }
54911 static inline __u64 __be64_to_cpup(const __be64 *p)
54912 {
54913- return __swab64p((__u64 *)p);
54914+ return __swab64p((const __u64 *)p);
54915 }
54916 static inline __be32 __cpu_to_be32p(const __u32 *p)
54917 {
54918- return (__force __be32)__swab32p(p);
54919+ return (__force const __be32)__swab32p(p);
54920 }
54921 static inline __u32 __be32_to_cpup(const __be32 *p)
54922 {
54923- return __swab32p((__u32 *)p);
54924+ return __swab32p((const __u32 *)p);
54925 }
54926 static inline __be16 __cpu_to_be16p(const __u16 *p)
54927 {
54928- return (__force __be16)__swab16p(p);
54929+ return (__force const __be16)__swab16p(p);
54930 }
54931 static inline __u16 __be16_to_cpup(const __be16 *p)
54932 {
54933- return __swab16p((__u16 *)p);
54934+ return __swab16p((const __u16 *)p);
54935 }
54936 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54937 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54938diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
54939--- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
54940+++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
54941@@ -16,6 +16,10 @@
54942 #define __read_mostly
54943 #endif
54944
54945+#ifndef __read_only
54946+#define __read_only __read_mostly
54947+#endif
54948+
54949 #ifndef ____cacheline_aligned
54950 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54951 #endif
54952diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
54953--- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
54954+++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
54955@@ -547,6 +547,9 @@ extern bool capable(int cap);
54956 extern bool ns_capable(struct user_namespace *ns, int cap);
54957 extern bool task_ns_capable(struct task_struct *t, int cap);
54958 extern bool nsown_capable(int cap);
54959+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
54960+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
54961+extern bool capable_nolog(int cap);
54962
54963 /* audit system wants to get cap info from files as well */
54964 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
54965diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
54966--- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
54967+++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
54968@@ -31,7 +31,7 @@ struct cleancache_ops {
54969 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
54970 void (*flush_inode)(int, struct cleancache_filekey);
54971 void (*flush_fs)(int);
54972-};
54973+} __no_const;
54974
54975 extern struct cleancache_ops
54976 cleancache_register_ops(struct cleancache_ops *ops);
54977diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
54978--- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
54979+++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
54980@@ -31,6 +31,12 @@
54981
54982
54983 #if __GNUC_MINOR__ >= 5
54984+
54985+#ifdef CONSTIFY_PLUGIN
54986+#define __no_const __attribute__((no_const))
54987+#define __do_const __attribute__((do_const))
54988+#endif
54989+
54990 /*
54991 * Mark a position in code as unreachable. This can be used to
54992 * suppress control flow warnings after asm blocks that transfer
54993@@ -46,6 +52,11 @@
54994 #define __noclone __attribute__((__noclone__))
54995
54996 #endif
54997+
54998+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54999+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
55000+#define __bos0(ptr) __bos((ptr), 0)
55001+#define __bos1(ptr) __bos((ptr), 1)
55002 #endif
55003
55004 #if __GNUC_MINOR__ > 0
55005diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
55006--- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
55007+++ linux-3.0.4/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
55008@@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
55009 # define __attribute_const__ /* unimplemented */
55010 #endif
55011
55012+#ifndef __no_const
55013+# define __no_const
55014+#endif
55015+
55016+#ifndef __do_const
55017+# define __do_const
55018+#endif
55019+
55020 /*
55021 * Tell gcc if a function is cold. The compiler will assume any path
55022 * directly leading to the call is unlikely.
55023@@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
55024 #define __cold
55025 #endif
55026
55027+#ifndef __alloc_size
55028+#define __alloc_size(...)
55029+#endif
55030+
55031+#ifndef __bos
55032+#define __bos(ptr, arg)
55033+#endif
55034+
55035+#ifndef __bos0
55036+#define __bos0(ptr)
55037+#endif
55038+
55039+#ifndef __bos1
55040+#define __bos1(ptr)
55041+#endif
55042+
55043 /* Simple shorthand for a section definition */
55044 #ifndef __section
55045 # define __section(S) __attribute__ ((__section__(#S)))
55046@@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
55047 * use is to mediate communication between process-level code and irq/NMI
55048 * handlers, all running on the same CPU.
55049 */
55050-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55051+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55052+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55053
55054 #endif /* __LINUX_COMPILER_H */
55055diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
55056--- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
55057+++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
55058@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
55059 * nodemask.
55060 */
55061 smp_mb();
55062- --ACCESS_ONCE(current->mems_allowed_change_disable);
55063+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
55064 }
55065
55066 static inline void set_mems_allowed(nodemask_t nodemask)
55067diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
55068--- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
55069+++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
55070@@ -361,7 +361,7 @@ struct cipher_tfm {
55071 const u8 *key, unsigned int keylen);
55072 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
55073 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
55074-};
55075+} __no_const;
55076
55077 struct hash_tfm {
55078 int (*init)(struct hash_desc *desc);
55079@@ -382,13 +382,13 @@ struct compress_tfm {
55080 int (*cot_decompress)(struct crypto_tfm *tfm,
55081 const u8 *src, unsigned int slen,
55082 u8 *dst, unsigned int *dlen);
55083-};
55084+} __no_const;
55085
55086 struct rng_tfm {
55087 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
55088 unsigned int dlen);
55089 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
55090-};
55091+} __no_const;
55092
55093 #define crt_ablkcipher crt_u.ablkcipher
55094 #define crt_aead crt_u.aead
55095diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
55096--- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
55097+++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
55098@@ -77,7 +77,7 @@ static void free(void *where)
55099 * warnings when not needed (indeed large_malloc / large_free are not
55100 * needed by inflate */
55101
55102-#define malloc(a) kmalloc(a, GFP_KERNEL)
55103+#define malloc(a) kmalloc((a), GFP_KERNEL)
55104 #define free(a) kfree(a)
55105
55106 #define large_malloc(a) vmalloc(a)
55107diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
55108--- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
55109+++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
55110@@ -50,7 +50,7 @@ struct dma_map_ops {
55111 int (*dma_supported)(struct device *dev, u64 mask);
55112 int (*set_dma_mask)(struct device *dev, u64 mask);
55113 int is_phys;
55114-};
55115+} __do_const;
55116
55117 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55118
55119diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
55120--- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
55121+++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
55122@@ -410,7 +410,7 @@ struct efivar_operations {
55123 efi_get_variable_t *get_variable;
55124 efi_get_next_variable_t *get_next_variable;
55125 efi_set_variable_t *set_variable;
55126-};
55127+} __no_const;
55128
55129 struct efivars {
55130 /*
55131diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
55132--- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
55133+++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
55134@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55135 #define PT_GNU_EH_FRAME 0x6474e550
55136
55137 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55138+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55139+
55140+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55141+
55142+/* Constants for the e_flags field */
55143+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55144+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55145+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55146+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55147+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55148+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55149
55150 /*
55151 * Extended Numbering
55152@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
55153 #define DT_DEBUG 21
55154 #define DT_TEXTREL 22
55155 #define DT_JMPREL 23
55156+#define DT_FLAGS 30
55157+ #define DF_TEXTREL 0x00000004
55158 #define DT_ENCODING 32
55159 #define OLD_DT_LOOS 0x60000000
55160 #define DT_LOOS 0x6000000d
55161@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
55162 #define PF_W 0x2
55163 #define PF_X 0x1
55164
55165+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55166+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55167+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55168+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55169+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55170+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55171+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55172+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55173+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55174+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55175+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55176+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55177+
55178 typedef struct elf32_phdr{
55179 Elf32_Word p_type;
55180 Elf32_Off p_offset;
55181@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
55182 #define EI_OSABI 7
55183 #define EI_PAD 8
55184
55185+#define EI_PAX 14
55186+
55187 #define ELFMAG0 0x7f /* EI_MAG */
55188 #define ELFMAG1 'E'
55189 #define ELFMAG2 'L'
55190@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
55191 #define elf_note elf32_note
55192 #define elf_addr_t Elf32_Off
55193 #define Elf_Half Elf32_Half
55194+#define elf_dyn Elf32_Dyn
55195
55196 #else
55197
55198@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
55199 #define elf_note elf64_note
55200 #define elf_addr_t Elf64_Off
55201 #define Elf_Half Elf64_Half
55202+#define elf_dyn Elf64_Dyn
55203
55204 #endif
55205
55206diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
55207--- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
55208+++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
55209@@ -428,7 +428,7 @@ struct fw_iso_context {
55210 union {
55211 fw_iso_callback_t sc;
55212 fw_iso_mc_callback_t mc;
55213- } callback;
55214+ } __no_const callback;
55215 void *callback_data;
55216 };
55217
55218diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
55219--- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
55220+++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
55221@@ -102,7 +102,7 @@ struct fscache_operation {
55222 fscache_operation_release_t release;
55223 };
55224
55225-extern atomic_t fscache_op_debug_id;
55226+extern atomic_unchecked_t fscache_op_debug_id;
55227 extern void fscache_op_work_func(struct work_struct *work);
55228
55229 extern void fscache_enqueue_operation(struct fscache_operation *);
55230@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
55231 {
55232 INIT_WORK(&op->work, fscache_op_work_func);
55233 atomic_set(&op->usage, 1);
55234- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
55235+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55236 op->processor = processor;
55237 op->release = release;
55238 INIT_LIST_HEAD(&op->pend_link);
55239diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
55240--- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
55241+++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
55242@@ -109,6 +109,11 @@ struct inodes_stat_t {
55243 /* File was opened by fanotify and shouldn't generate fanotify events */
55244 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
55245
55246+/* Hack for grsec so as not to require read permission simply to execute
55247+ * a binary
55248+ */
55249+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
55250+
55251 /*
55252 * The below are the various read and write types that we support. Some of
55253 * them include behavioral modifiers that send information down to the
55254@@ -1571,7 +1576,8 @@ struct file_operations {
55255 int (*setlease)(struct file *, long, struct file_lock **);
55256 long (*fallocate)(struct file *file, int mode, loff_t offset,
55257 loff_t len);
55258-};
55259+} __do_const;
55260+typedef struct file_operations __no_const file_operations_no_const;
55261
55262 #define IPERM_FLAG_RCU 0x0001
55263
55264diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
55265--- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
55266+++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
55267@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
55268 */
55269 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
55270 {
55271- return kstrdup(name, GFP_KERNEL);
55272+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
55273 }
55274
55275 /*
55276diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
55277--- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
55278+++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
55279@@ -6,7 +6,7 @@
55280 #include <linux/seqlock.h>
55281
55282 struct fs_struct {
55283- int users;
55284+ atomic_t users;
55285 spinlock_t lock;
55286 seqcount_t seq;
55287 int umask;
55288diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
55289--- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
55290+++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
55291@@ -96,7 +96,7 @@ struct trace_event_functions {
55292 trace_print_func raw;
55293 trace_print_func hex;
55294 trace_print_func binary;
55295-};
55296+} __no_const;
55297
55298 struct trace_event {
55299 struct hlist_node node;
55300@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
55301 extern int trace_add_event_call(struct ftrace_event_call *call);
55302 extern void trace_remove_event_call(struct ftrace_event_call *call);
55303
55304-#define is_signed_type(type) (((type)(-1)) < 0)
55305+#define is_signed_type(type) (((type)(-1)) < (type)1)
55306
55307 int trace_set_clr_event(const char *system, const char *event, int set);
55308
55309diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
55310--- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
55311+++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
55312@@ -184,7 +184,7 @@ struct gendisk {
55313 struct kobject *slave_dir;
55314
55315 struct timer_rand_state *random;
55316- atomic_t sync_io; /* RAID */
55317+ atomic_unchecked_t sync_io; /* RAID */
55318 struct disk_events *ev;
55319 #ifdef CONFIG_BLK_DEV_INTEGRITY
55320 struct blk_integrity *integrity;
55321diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
55322--- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55323+++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
55324@@ -0,0 +1,317 @@
55325+#ifndef GR_ACL_H
55326+#define GR_ACL_H
55327+
55328+#include <linux/grdefs.h>
55329+#include <linux/resource.h>
55330+#include <linux/capability.h>
55331+#include <linux/dcache.h>
55332+#include <asm/resource.h>
55333+
55334+/* Major status information */
55335+
55336+#define GR_VERSION "grsecurity 2.2.2"
55337+#define GRSECURITY_VERSION 0x2202
55338+
55339+enum {
55340+ GR_SHUTDOWN = 0,
55341+ GR_ENABLE = 1,
55342+ GR_SPROLE = 2,
55343+ GR_RELOAD = 3,
55344+ GR_SEGVMOD = 4,
55345+ GR_STATUS = 5,
55346+ GR_UNSPROLE = 6,
55347+ GR_PASSSET = 7,
55348+ GR_SPROLEPAM = 8,
55349+};
55350+
55351+/* Password setup definitions
55352+ * kernel/grhash.c */
55353+enum {
55354+ GR_PW_LEN = 128,
55355+ GR_SALT_LEN = 16,
55356+ GR_SHA_LEN = 32,
55357+};
55358+
55359+enum {
55360+ GR_SPROLE_LEN = 64,
55361+};
55362+
55363+enum {
55364+ GR_NO_GLOB = 0,
55365+ GR_REG_GLOB,
55366+ GR_CREATE_GLOB
55367+};
55368+
55369+#define GR_NLIMITS 32
55370+
55371+/* Begin Data Structures */
55372+
55373+struct sprole_pw {
55374+ unsigned char *rolename;
55375+ unsigned char salt[GR_SALT_LEN];
55376+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55377+};
55378+
55379+struct name_entry {
55380+ __u32 key;
55381+ ino_t inode;
55382+ dev_t device;
55383+ char *name;
55384+ __u16 len;
55385+ __u8 deleted;
55386+ struct name_entry *prev;
55387+ struct name_entry *next;
55388+};
55389+
55390+struct inodev_entry {
55391+ struct name_entry *nentry;
55392+ struct inodev_entry *prev;
55393+ struct inodev_entry *next;
55394+};
55395+
55396+struct acl_role_db {
55397+ struct acl_role_label **r_hash;
55398+ __u32 r_size;
55399+};
55400+
55401+struct inodev_db {
55402+ struct inodev_entry **i_hash;
55403+ __u32 i_size;
55404+};
55405+
55406+struct name_db {
55407+ struct name_entry **n_hash;
55408+ __u32 n_size;
55409+};
55410+
55411+struct crash_uid {
55412+ uid_t uid;
55413+ unsigned long expires;
55414+};
55415+
55416+struct gr_hash_struct {
55417+ void **table;
55418+ void **nametable;
55419+ void *first;
55420+ __u32 table_size;
55421+ __u32 used_size;
55422+ int type;
55423+};
55424+
55425+/* Userspace Grsecurity ACL data structures */
55426+
55427+struct acl_subject_label {
55428+ char *filename;
55429+ ino_t inode;
55430+ dev_t device;
55431+ __u32 mode;
55432+ kernel_cap_t cap_mask;
55433+ kernel_cap_t cap_lower;
55434+ kernel_cap_t cap_invert_audit;
55435+
55436+ struct rlimit res[GR_NLIMITS];
55437+ __u32 resmask;
55438+
55439+ __u8 user_trans_type;
55440+ __u8 group_trans_type;
55441+ uid_t *user_transitions;
55442+ gid_t *group_transitions;
55443+ __u16 user_trans_num;
55444+ __u16 group_trans_num;
55445+
55446+ __u32 sock_families[2];
55447+ __u32 ip_proto[8];
55448+ __u32 ip_type;
55449+ struct acl_ip_label **ips;
55450+ __u32 ip_num;
55451+ __u32 inaddr_any_override;
55452+
55453+ __u32 crashes;
55454+ unsigned long expires;
55455+
55456+ struct acl_subject_label *parent_subject;
55457+ struct gr_hash_struct *hash;
55458+ struct acl_subject_label *prev;
55459+ struct acl_subject_label *next;
55460+
55461+ struct acl_object_label **obj_hash;
55462+ __u32 obj_hash_size;
55463+ __u16 pax_flags;
55464+};
55465+
55466+struct role_allowed_ip {
55467+ __u32 addr;
55468+ __u32 netmask;
55469+
55470+ struct role_allowed_ip *prev;
55471+ struct role_allowed_ip *next;
55472+};
55473+
55474+struct role_transition {
55475+ char *rolename;
55476+
55477+ struct role_transition *prev;
55478+ struct role_transition *next;
55479+};
55480+
55481+struct acl_role_label {
55482+ char *rolename;
55483+ uid_t uidgid;
55484+ __u16 roletype;
55485+
55486+ __u16 auth_attempts;
55487+ unsigned long expires;
55488+
55489+ struct acl_subject_label *root_label;
55490+ struct gr_hash_struct *hash;
55491+
55492+ struct acl_role_label *prev;
55493+ struct acl_role_label *next;
55494+
55495+ struct role_transition *transitions;
55496+ struct role_allowed_ip *allowed_ips;
55497+ uid_t *domain_children;
55498+ __u16 domain_child_num;
55499+
55500+ struct acl_subject_label **subj_hash;
55501+ __u32 subj_hash_size;
55502+};
55503+
55504+struct user_acl_role_db {
55505+ struct acl_role_label **r_table;
55506+ __u32 num_pointers; /* Number of allocations to track */
55507+ __u32 num_roles; /* Number of roles */
55508+ __u32 num_domain_children; /* Number of domain children */
55509+ __u32 num_subjects; /* Number of subjects */
55510+ __u32 num_objects; /* Number of objects */
55511+};
55512+
55513+struct acl_object_label {
55514+ char *filename;
55515+ ino_t inode;
55516+ dev_t device;
55517+ __u32 mode;
55518+
55519+ struct acl_subject_label *nested;
55520+ struct acl_object_label *globbed;
55521+
55522+ /* next two structures not used */
55523+
55524+ struct acl_object_label *prev;
55525+ struct acl_object_label *next;
55526+};
55527+
55528+struct acl_ip_label {
55529+ char *iface;
55530+ __u32 addr;
55531+ __u32 netmask;
55532+ __u16 low, high;
55533+ __u8 mode;
55534+ __u32 type;
55535+ __u32 proto[8];
55536+
55537+ /* next two structures not used */
55538+
55539+ struct acl_ip_label *prev;
55540+ struct acl_ip_label *next;
55541+};
55542+
55543+struct gr_arg {
55544+ struct user_acl_role_db role_db;
55545+ unsigned char pw[GR_PW_LEN];
55546+ unsigned char salt[GR_SALT_LEN];
55547+ unsigned char sum[GR_SHA_LEN];
55548+ unsigned char sp_role[GR_SPROLE_LEN];
55549+ struct sprole_pw *sprole_pws;
55550+ dev_t segv_device;
55551+ ino_t segv_inode;
55552+ uid_t segv_uid;
55553+ __u16 num_sprole_pws;
55554+ __u16 mode;
55555+};
55556+
55557+struct gr_arg_wrapper {
55558+ struct gr_arg *arg;
55559+ __u32 version;
55560+ __u32 size;
55561+};
55562+
55563+struct subject_map {
55564+ struct acl_subject_label *user;
55565+ struct acl_subject_label *kernel;
55566+ struct subject_map *prev;
55567+ struct subject_map *next;
55568+};
55569+
55570+struct acl_subj_map_db {
55571+ struct subject_map **s_hash;
55572+ __u32 s_size;
55573+};
55574+
55575+/* End Data Structures Section */
55576+
55577+/* Hash functions generated by empirical testing by Brad Spengler
55578+ Makes good use of the low bits of the inode. Generally 0-1 times
55579+ in loop for successful match. 0-3 for unsuccessful match.
55580+ Shift/add algorithm with modulus of table size and an XOR*/
55581+
55582+static __inline__ unsigned int
55583+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55584+{
55585+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
55586+}
55587+
55588+ static __inline__ unsigned int
55589+shash(const struct acl_subject_label *userp, const unsigned int sz)
55590+{
55591+ return ((const unsigned long)userp % sz);
55592+}
55593+
55594+static __inline__ unsigned int
55595+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55596+{
55597+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55598+}
55599+
55600+static __inline__ unsigned int
55601+nhash(const char *name, const __u16 len, const unsigned int sz)
55602+{
55603+ return full_name_hash((const unsigned char *)name, len) % sz;
55604+}
55605+
55606+#define FOR_EACH_ROLE_START(role) \
55607+ role = role_list; \
55608+ while (role) {
55609+
55610+#define FOR_EACH_ROLE_END(role) \
55611+ role = role->prev; \
55612+ }
55613+
55614+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55615+ subj = NULL; \
55616+ iter = 0; \
55617+ while (iter < role->subj_hash_size) { \
55618+ if (subj == NULL) \
55619+ subj = role->subj_hash[iter]; \
55620+ if (subj == NULL) { \
55621+ iter++; \
55622+ continue; \
55623+ }
55624+
55625+#define FOR_EACH_SUBJECT_END(subj,iter) \
55626+ subj = subj->next; \
55627+ if (subj == NULL) \
55628+ iter++; \
55629+ }
55630+
55631+
55632+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55633+ subj = role->hash->first; \
55634+ while (subj != NULL) {
55635+
55636+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55637+ subj = subj->next; \
55638+ }
55639+
55640+#endif
55641+
55642diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
55643--- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55644+++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
55645@@ -0,0 +1,9 @@
55646+#ifndef __GRALLOC_H
55647+#define __GRALLOC_H
55648+
55649+void acl_free_all(void);
55650+int acl_alloc_stack_init(unsigned long size);
55651+void *acl_alloc(unsigned long len);
55652+void *acl_alloc_num(unsigned long num, unsigned long len);
55653+
55654+#endif
55655diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
55656--- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55657+++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
55658@@ -0,0 +1,140 @@
55659+#ifndef GRDEFS_H
55660+#define GRDEFS_H
55661+
55662+/* Begin grsecurity status declarations */
55663+
55664+enum {
55665+ GR_READY = 0x01,
55666+ GR_STATUS_INIT = 0x00 // disabled state
55667+};
55668+
55669+/* Begin ACL declarations */
55670+
55671+/* Role flags */
55672+
55673+enum {
55674+ GR_ROLE_USER = 0x0001,
55675+ GR_ROLE_GROUP = 0x0002,
55676+ GR_ROLE_DEFAULT = 0x0004,
55677+ GR_ROLE_SPECIAL = 0x0008,
55678+ GR_ROLE_AUTH = 0x0010,
55679+ GR_ROLE_NOPW = 0x0020,
55680+ GR_ROLE_GOD = 0x0040,
55681+ GR_ROLE_LEARN = 0x0080,
55682+ GR_ROLE_TPE = 0x0100,
55683+ GR_ROLE_DOMAIN = 0x0200,
55684+ GR_ROLE_PAM = 0x0400,
55685+ GR_ROLE_PERSIST = 0x0800
55686+};
55687+
55688+/* ACL Subject and Object mode flags */
55689+enum {
55690+ GR_DELETED = 0x80000000
55691+};
55692+
55693+/* ACL Object-only mode flags */
55694+enum {
55695+ GR_READ = 0x00000001,
55696+ GR_APPEND = 0x00000002,
55697+ GR_WRITE = 0x00000004,
55698+ GR_EXEC = 0x00000008,
55699+ GR_FIND = 0x00000010,
55700+ GR_INHERIT = 0x00000020,
55701+ GR_SETID = 0x00000040,
55702+ GR_CREATE = 0x00000080,
55703+ GR_DELETE = 0x00000100,
55704+ GR_LINK = 0x00000200,
55705+ GR_AUDIT_READ = 0x00000400,
55706+ GR_AUDIT_APPEND = 0x00000800,
55707+ GR_AUDIT_WRITE = 0x00001000,
55708+ GR_AUDIT_EXEC = 0x00002000,
55709+ GR_AUDIT_FIND = 0x00004000,
55710+ GR_AUDIT_INHERIT= 0x00008000,
55711+ GR_AUDIT_SETID = 0x00010000,
55712+ GR_AUDIT_CREATE = 0x00020000,
55713+ GR_AUDIT_DELETE = 0x00040000,
55714+ GR_AUDIT_LINK = 0x00080000,
55715+ GR_PTRACERD = 0x00100000,
55716+ GR_NOPTRACE = 0x00200000,
55717+ GR_SUPPRESS = 0x00400000,
55718+ GR_NOLEARN = 0x00800000,
55719+ GR_INIT_TRANSFER= 0x01000000
55720+};
55721+
55722+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55723+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55724+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55725+
55726+/* ACL subject-only mode flags */
55727+enum {
55728+ GR_KILL = 0x00000001,
55729+ GR_VIEW = 0x00000002,
55730+ GR_PROTECTED = 0x00000004,
55731+ GR_LEARN = 0x00000008,
55732+ GR_OVERRIDE = 0x00000010,
55733+ /* just a placeholder, this mode is only used in userspace */
55734+ GR_DUMMY = 0x00000020,
55735+ GR_PROTSHM = 0x00000040,
55736+ GR_KILLPROC = 0x00000080,
55737+ GR_KILLIPPROC = 0x00000100,
55738+ /* just a placeholder, this mode is only used in userspace */
55739+ GR_NOTROJAN = 0x00000200,
55740+ GR_PROTPROCFD = 0x00000400,
55741+ GR_PROCACCT = 0x00000800,
55742+ GR_RELAXPTRACE = 0x00001000,
55743+ GR_NESTED = 0x00002000,
55744+ GR_INHERITLEARN = 0x00004000,
55745+ GR_PROCFIND = 0x00008000,
55746+ GR_POVERRIDE = 0x00010000,
55747+ GR_KERNELAUTH = 0x00020000,
55748+ GR_ATSECURE = 0x00040000,
55749+ GR_SHMEXEC = 0x00080000
55750+};
55751+
55752+enum {
55753+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55754+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55755+ GR_PAX_ENABLE_MPROTECT = 0x0004,
55756+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
55757+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55758+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55759+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55760+ GR_PAX_DISABLE_MPROTECT = 0x0400,
55761+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
55762+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55763+};
55764+
55765+enum {
55766+ GR_ID_USER = 0x01,
55767+ GR_ID_GROUP = 0x02,
55768+};
55769+
55770+enum {
55771+ GR_ID_ALLOW = 0x01,
55772+ GR_ID_DENY = 0x02,
55773+};
55774+
55775+#define GR_CRASH_RES 31
55776+#define GR_UIDTABLE_MAX 500
55777+
55778+/* begin resource learning section */
55779+enum {
55780+ GR_RLIM_CPU_BUMP = 60,
55781+ GR_RLIM_FSIZE_BUMP = 50000,
55782+ GR_RLIM_DATA_BUMP = 10000,
55783+ GR_RLIM_STACK_BUMP = 1000,
55784+ GR_RLIM_CORE_BUMP = 10000,
55785+ GR_RLIM_RSS_BUMP = 500000,
55786+ GR_RLIM_NPROC_BUMP = 1,
55787+ GR_RLIM_NOFILE_BUMP = 5,
55788+ GR_RLIM_MEMLOCK_BUMP = 50000,
55789+ GR_RLIM_AS_BUMP = 500000,
55790+ GR_RLIM_LOCKS_BUMP = 2,
55791+ GR_RLIM_SIGPENDING_BUMP = 5,
55792+ GR_RLIM_MSGQUEUE_BUMP = 10000,
55793+ GR_RLIM_NICE_BUMP = 1,
55794+ GR_RLIM_RTPRIO_BUMP = 1,
55795+ GR_RLIM_RTTIME_BUMP = 1000000
55796+};
55797+
55798+#endif
55799diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
55800--- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55801+++ linux-3.0.4/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
55802@@ -0,0 +1,219 @@
55803+#ifndef __GRINTERNAL_H
55804+#define __GRINTERNAL_H
55805+
55806+#ifdef CONFIG_GRKERNSEC
55807+
55808+#include <linux/fs.h>
55809+#include <linux/mnt_namespace.h>
55810+#include <linux/nsproxy.h>
55811+#include <linux/gracl.h>
55812+#include <linux/grdefs.h>
55813+#include <linux/grmsg.h>
55814+
55815+void gr_add_learn_entry(const char *fmt, ...)
55816+ __attribute__ ((format (printf, 1, 2)));
55817+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55818+ const struct vfsmount *mnt);
55819+__u32 gr_check_create(const struct dentry *new_dentry,
55820+ const struct dentry *parent,
55821+ const struct vfsmount *mnt, const __u32 mode);
55822+int gr_check_protected_task(const struct task_struct *task);
55823+__u32 to_gr_audit(const __u32 reqmode);
55824+int gr_set_acls(const int type);
55825+int gr_apply_subject_to_task(struct task_struct *task);
55826+int gr_acl_is_enabled(void);
55827+char gr_roletype_to_char(void);
55828+
55829+void gr_handle_alertkill(struct task_struct *task);
55830+char *gr_to_filename(const struct dentry *dentry,
55831+ const struct vfsmount *mnt);
55832+char *gr_to_filename1(const struct dentry *dentry,
55833+ const struct vfsmount *mnt);
55834+char *gr_to_filename2(const struct dentry *dentry,
55835+ const struct vfsmount *mnt);
55836+char *gr_to_filename3(const struct dentry *dentry,
55837+ const struct vfsmount *mnt);
55838+
55839+extern int grsec_enable_harden_ptrace;
55840+extern int grsec_enable_link;
55841+extern int grsec_enable_fifo;
55842+extern int grsec_enable_execve;
55843+extern int grsec_enable_shm;
55844+extern int grsec_enable_execlog;
55845+extern int grsec_enable_signal;
55846+extern int grsec_enable_audit_ptrace;
55847+extern int grsec_enable_forkfail;
55848+extern int grsec_enable_time;
55849+extern int grsec_enable_rofs;
55850+extern int grsec_enable_chroot_shmat;
55851+extern int grsec_enable_chroot_mount;
55852+extern int grsec_enable_chroot_double;
55853+extern int grsec_enable_chroot_pivot;
55854+extern int grsec_enable_chroot_chdir;
55855+extern int grsec_enable_chroot_chmod;
55856+extern int grsec_enable_chroot_mknod;
55857+extern int grsec_enable_chroot_fchdir;
55858+extern int grsec_enable_chroot_nice;
55859+extern int grsec_enable_chroot_execlog;
55860+extern int grsec_enable_chroot_caps;
55861+extern int grsec_enable_chroot_sysctl;
55862+extern int grsec_enable_chroot_unix;
55863+extern int grsec_enable_tpe;
55864+extern int grsec_tpe_gid;
55865+extern int grsec_enable_tpe_all;
55866+extern int grsec_enable_tpe_invert;
55867+extern int grsec_enable_socket_all;
55868+extern int grsec_socket_all_gid;
55869+extern int grsec_enable_socket_client;
55870+extern int grsec_socket_client_gid;
55871+extern int grsec_enable_socket_server;
55872+extern int grsec_socket_server_gid;
55873+extern int grsec_audit_gid;
55874+extern int grsec_enable_group;
55875+extern int grsec_enable_audit_textrel;
55876+extern int grsec_enable_log_rwxmaps;
55877+extern int grsec_enable_mount;
55878+extern int grsec_enable_chdir;
55879+extern int grsec_resource_logging;
55880+extern int grsec_enable_blackhole;
55881+extern int grsec_lastack_retries;
55882+extern int grsec_enable_brute;
55883+extern int grsec_lock;
55884+
55885+extern spinlock_t grsec_alert_lock;
55886+extern unsigned long grsec_alert_wtime;
55887+extern unsigned long grsec_alert_fyet;
55888+
55889+extern spinlock_t grsec_audit_lock;
55890+
55891+extern rwlock_t grsec_exec_file_lock;
55892+
55893+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55894+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55895+ (tsk)->exec_file->f_vfsmnt) : "/")
55896+
55897+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55898+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55899+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55900+
55901+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55902+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
55903+ (tsk)->exec_file->f_vfsmnt) : "/")
55904+
55905+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55906+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55907+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55908+
55909+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55910+
55911+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55912+
55913+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55914+ (task)->pid, (cred)->uid, \
55915+ (cred)->euid, (cred)->gid, (cred)->egid, \
55916+ gr_parent_task_fullpath(task), \
55917+ (task)->real_parent->comm, (task)->real_parent->pid, \
55918+ (pcred)->uid, (pcred)->euid, \
55919+ (pcred)->gid, (pcred)->egid
55920+
55921+#define GR_CHROOT_CAPS {{ \
55922+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55923+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55924+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55925+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55926+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55927+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55928+
55929+#define security_learn(normal_msg,args...) \
55930+({ \
55931+ read_lock(&grsec_exec_file_lock); \
55932+ gr_add_learn_entry(normal_msg "\n", ## args); \
55933+ read_unlock(&grsec_exec_file_lock); \
55934+})
55935+
55936+enum {
55937+ GR_DO_AUDIT,
55938+ GR_DONT_AUDIT,
55939+ /* used for non-audit messages that we shouldn't kill the task on */
55940+ GR_DONT_AUDIT_GOOD
55941+};
55942+
55943+enum {
55944+ GR_TTYSNIFF,
55945+ GR_RBAC,
55946+ GR_RBAC_STR,
55947+ GR_STR_RBAC,
55948+ GR_RBAC_MODE2,
55949+ GR_RBAC_MODE3,
55950+ GR_FILENAME,
55951+ GR_SYSCTL_HIDDEN,
55952+ GR_NOARGS,
55953+ GR_ONE_INT,
55954+ GR_ONE_INT_TWO_STR,
55955+ GR_ONE_STR,
55956+ GR_STR_INT,
55957+ GR_TWO_STR_INT,
55958+ GR_TWO_INT,
55959+ GR_TWO_U64,
55960+ GR_THREE_INT,
55961+ GR_FIVE_INT_TWO_STR,
55962+ GR_TWO_STR,
55963+ GR_THREE_STR,
55964+ GR_FOUR_STR,
55965+ GR_STR_FILENAME,
55966+ GR_FILENAME_STR,
55967+ GR_FILENAME_TWO_INT,
55968+ GR_FILENAME_TWO_INT_STR,
55969+ GR_TEXTREL,
55970+ GR_PTRACE,
55971+ GR_RESOURCE,
55972+ GR_CAP,
55973+ GR_SIG,
55974+ GR_SIG2,
55975+ GR_CRASH1,
55976+ GR_CRASH2,
55977+ GR_PSACCT,
55978+ GR_RWXMAP
55979+};
55980+
55981+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55982+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55983+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55984+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55985+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55986+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55987+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55988+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55989+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55990+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55991+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55992+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55993+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55994+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55995+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55996+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55997+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55998+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55999+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
56000+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
56001+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
56002+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
56003+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
56004+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
56005+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
56006+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
56007+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
56008+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
56009+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
56010+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
56011+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
56012+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
56013+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56014+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56015+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56016+
56017+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56018+
56019+#endif
56020+
56021+#endif
56022diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
56023--- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56024+++ linux-3.0.4/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
56025@@ -0,0 +1,108 @@
56026+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56027+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56028+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56029+#define GR_STOPMOD_MSG "denied modification of module state by "
56030+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56031+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56032+#define GR_IOPERM_MSG "denied use of ioperm() by "
56033+#define GR_IOPL_MSG "denied use of iopl() by "
56034+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56035+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56036+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56037+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56038+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56039+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56040+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56041+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56042+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56043+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56044+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56045+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56046+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56047+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56048+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56049+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56050+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56051+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56052+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56053+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56054+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56055+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56056+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56057+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56058+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56059+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56060+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56061+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56062+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56063+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56064+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56065+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56066+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56067+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56068+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56069+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56070+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56071+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56072+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56073+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56074+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56075+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56076+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56077+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56078+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56079+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56080+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56081+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56082+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56083+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56084+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56085+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56086+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56087+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56088+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56089+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56090+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56091+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56092+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56093+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56094+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56095+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56096+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56097+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56098+#define GR_FAILFORK_MSG "failed fork with errno %s by "
56099+#define GR_NICE_CHROOT_MSG "denied priority change by "
56100+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56101+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56102+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56103+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56104+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56105+#define GR_TIME_MSG "time set by "
56106+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56107+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56108+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56109+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56110+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56111+#define GR_BIND_MSG "denied bind() by "
56112+#define GR_CONNECT_MSG "denied connect() by "
56113+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56114+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56115+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56116+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56117+#define GR_CAP_ACL_MSG "use of %s denied for "
56118+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
56119+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56120+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56121+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56122+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56123+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56124+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56125+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56126+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56127+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56128+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56129+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56130+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56131+#define GR_VM86_MSG "denied use of vm86 by "
56132+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56133+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56134diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
56135--- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56136+++ linux-3.0.4/include/linux/grsecurity.h 2011-09-14 09:16:54.000000000 -0400
56137@@ -0,0 +1,226 @@
56138+#ifndef GR_SECURITY_H
56139+#define GR_SECURITY_H
56140+#include <linux/fs.h>
56141+#include <linux/fs_struct.h>
56142+#include <linux/binfmts.h>
56143+#include <linux/gracl.h>
56144+
56145+/* notify of brain-dead configs */
56146+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56147+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
56148+#endif
56149+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56150+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56151+#endif
56152+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56153+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56154+#endif
56155+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56156+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56157+#endif
56158+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56159+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56160+#endif
56161+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56162+#error "CONFIG_PAX enabled, but no PaX options are enabled."
56163+#endif
56164+
56165+#include <linux/compat.h>
56166+
56167+struct user_arg_ptr {
56168+#ifdef CONFIG_COMPAT
56169+ bool is_compat;
56170+#endif
56171+ union {
56172+ const char __user *const __user *native;
56173+#ifdef CONFIG_COMPAT
56174+ compat_uptr_t __user *compat;
56175+#endif
56176+ } ptr;
56177+};
56178+
56179+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56180+void gr_handle_brute_check(void);
56181+void gr_handle_kernel_exploit(void);
56182+int gr_process_user_ban(void);
56183+
56184+char gr_roletype_to_char(void);
56185+
56186+int gr_acl_enable_at_secure(void);
56187+
56188+int gr_check_user_change(int real, int effective, int fs);
56189+int gr_check_group_change(int real, int effective, int fs);
56190+
56191+void gr_del_task_from_ip_table(struct task_struct *p);
56192+
56193+int gr_pid_is_chrooted(struct task_struct *p);
56194+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
56195+int gr_handle_chroot_nice(void);
56196+int gr_handle_chroot_sysctl(const int op);
56197+int gr_handle_chroot_setpriority(struct task_struct *p,
56198+ const int niceval);
56199+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
56200+int gr_handle_chroot_chroot(const struct dentry *dentry,
56201+ const struct vfsmount *mnt);
56202+void gr_handle_chroot_chdir(struct path *path);
56203+int gr_handle_chroot_chmod(const struct dentry *dentry,
56204+ const struct vfsmount *mnt, const int mode);
56205+int gr_handle_chroot_mknod(const struct dentry *dentry,
56206+ const struct vfsmount *mnt, const int mode);
56207+int gr_handle_chroot_mount(const struct dentry *dentry,
56208+ const struct vfsmount *mnt,
56209+ const char *dev_name);
56210+int gr_handle_chroot_pivot(void);
56211+int gr_handle_chroot_unix(const pid_t pid);
56212+
56213+int gr_handle_rawio(const struct inode *inode);
56214+
56215+void gr_handle_ioperm(void);
56216+void gr_handle_iopl(void);
56217+
56218+int gr_tpe_allow(const struct file *file);
56219+
56220+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
56221+void gr_clear_chroot_entries(struct task_struct *task);
56222+
56223+void gr_log_forkfail(const int retval);
56224+void gr_log_timechange(void);
56225+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
56226+void gr_log_chdir(const struct dentry *dentry,
56227+ const struct vfsmount *mnt);
56228+void gr_log_chroot_exec(const struct dentry *dentry,
56229+ const struct vfsmount *mnt);
56230+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
56231+void gr_log_remount(const char *devname, const int retval);
56232+void gr_log_unmount(const char *devname, const int retval);
56233+void gr_log_mount(const char *from, const char *to, const int retval);
56234+void gr_log_textrel(struct vm_area_struct *vma);
56235+void gr_log_rwxmmap(struct file *file);
56236+void gr_log_rwxmprotect(struct file *file);
56237+
56238+int gr_handle_follow_link(const struct inode *parent,
56239+ const struct inode *inode,
56240+ const struct dentry *dentry,
56241+ const struct vfsmount *mnt);
56242+int gr_handle_fifo(const struct dentry *dentry,
56243+ const struct vfsmount *mnt,
56244+ const struct dentry *dir, const int flag,
56245+ const int acc_mode);
56246+int gr_handle_hardlink(const struct dentry *dentry,
56247+ const struct vfsmount *mnt,
56248+ struct inode *inode,
56249+ const int mode, const char *to);
56250+
56251+int gr_is_capable(const int cap);
56252+int gr_is_capable_nolog(const int cap);
56253+void gr_learn_resource(const struct task_struct *task, const int limit,
56254+ const unsigned long wanted, const int gt);
56255+void gr_copy_label(struct task_struct *tsk);
56256+void gr_handle_crash(struct task_struct *task, const int sig);
56257+int gr_handle_signal(const struct task_struct *p, const int sig);
56258+int gr_check_crash_uid(const uid_t uid);
56259+int gr_check_protected_task(const struct task_struct *task);
56260+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
56261+int gr_acl_handle_mmap(const struct file *file,
56262+ const unsigned long prot);
56263+int gr_acl_handle_mprotect(const struct file *file,
56264+ const unsigned long prot);
56265+int gr_check_hidden_task(const struct task_struct *tsk);
56266+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
56267+ const struct vfsmount *mnt);
56268+__u32 gr_acl_handle_utime(const struct dentry *dentry,
56269+ const struct vfsmount *mnt);
56270+__u32 gr_acl_handle_access(const struct dentry *dentry,
56271+ const struct vfsmount *mnt, const int fmode);
56272+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
56273+ const struct vfsmount *mnt, mode_t mode);
56274+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
56275+ const struct vfsmount *mnt, mode_t mode);
56276+__u32 gr_acl_handle_chown(const struct dentry *dentry,
56277+ const struct vfsmount *mnt);
56278+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
56279+ const struct vfsmount *mnt);
56280+int gr_handle_ptrace(struct task_struct *task, const long request);
56281+int gr_handle_proc_ptrace(struct task_struct *task);
56282+__u32 gr_acl_handle_execve(const struct dentry *dentry,
56283+ const struct vfsmount *mnt);
56284+int gr_check_crash_exec(const struct file *filp);
56285+int gr_acl_is_enabled(void);
56286+void gr_set_kernel_label(struct task_struct *task);
56287+void gr_set_role_label(struct task_struct *task, const uid_t uid,
56288+ const gid_t gid);
56289+int gr_set_proc_label(const struct dentry *dentry,
56290+ const struct vfsmount *mnt,
56291+ const int unsafe_share);
56292+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56293+ const struct vfsmount *mnt);
56294+__u32 gr_acl_handle_open(const struct dentry *dentry,
56295+ const struct vfsmount *mnt, const int fmode);
56296+__u32 gr_acl_handle_creat(const struct dentry *dentry,
56297+ const struct dentry *p_dentry,
56298+ const struct vfsmount *p_mnt, const int fmode,
56299+ const int imode);
56300+void gr_handle_create(const struct dentry *dentry,
56301+ const struct vfsmount *mnt);
56302+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56303+ const struct dentry *parent_dentry,
56304+ const struct vfsmount *parent_mnt,
56305+ const int mode);
56306+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56307+ const struct dentry *parent_dentry,
56308+ const struct vfsmount *parent_mnt);
56309+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56310+ const struct vfsmount *mnt);
56311+void gr_handle_delete(const ino_t ino, const dev_t dev);
56312+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56313+ const struct vfsmount *mnt);
56314+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56315+ const struct dentry *parent_dentry,
56316+ const struct vfsmount *parent_mnt,
56317+ const char *from);
56318+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56319+ const struct dentry *parent_dentry,
56320+ const struct vfsmount *parent_mnt,
56321+ const struct dentry *old_dentry,
56322+ const struct vfsmount *old_mnt, const char *to);
56323+int gr_acl_handle_rename(struct dentry *new_dentry,
56324+ struct dentry *parent_dentry,
56325+ const struct vfsmount *parent_mnt,
56326+ struct dentry *old_dentry,
56327+ struct inode *old_parent_inode,
56328+ struct vfsmount *old_mnt, const char *newname);
56329+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56330+ struct dentry *old_dentry,
56331+ struct dentry *new_dentry,
56332+ struct vfsmount *mnt, const __u8 replace);
56333+__u32 gr_check_link(const struct dentry *new_dentry,
56334+ const struct dentry *parent_dentry,
56335+ const struct vfsmount *parent_mnt,
56336+ const struct dentry *old_dentry,
56337+ const struct vfsmount *old_mnt);
56338+int gr_acl_handle_filldir(const struct file *file, const char *name,
56339+ const unsigned int namelen, const ino_t ino);
56340+
56341+__u32 gr_acl_handle_unix(const struct dentry *dentry,
56342+ const struct vfsmount *mnt);
56343+void gr_acl_handle_exit(void);
56344+void gr_acl_handle_psacct(struct task_struct *task, const long code);
56345+int gr_acl_handle_procpidmem(const struct task_struct *task);
56346+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56347+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56348+void gr_audit_ptrace(struct task_struct *task);
56349+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56350+
56351+#ifdef CONFIG_GRKERNSEC
56352+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56353+void gr_handle_vm86(void);
56354+void gr_handle_mem_readwrite(u64 from, u64 to);
56355+
56356+extern int grsec_enable_dmesg;
56357+extern int grsec_disable_privio;
56358+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56359+extern int grsec_enable_chroot_findtask;
56360+#endif
56361+#endif
56362+
56363+#endif
56364diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
56365--- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
56366+++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
56367@@ -0,0 +1,19 @@
56368+#ifndef __GRSOCK_H
56369+#define __GRSOCK_H
56370+
56371+extern void gr_attach_curr_ip(const struct sock *sk);
56372+extern int gr_handle_sock_all(const int family, const int type,
56373+ const int protocol);
56374+extern int gr_handle_sock_server(const struct sockaddr *sck);
56375+extern int gr_handle_sock_server_other(const struct sock *sck);
56376+extern int gr_handle_sock_client(const struct sockaddr *sck);
56377+extern int gr_search_connect(struct socket * sock,
56378+ struct sockaddr_in * addr);
56379+extern int gr_search_bind(struct socket * sock,
56380+ struct sockaddr_in * addr);
56381+extern int gr_search_listen(struct socket * sock);
56382+extern int gr_search_accept(struct socket * sock);
56383+extern int gr_search_socket(const int domain, const int type,
56384+ const int protocol);
56385+
56386+#endif
56387diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
56388--- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
56389+++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
56390@@ -675,7 +675,7 @@ struct hid_ll_driver {
56391 unsigned int code, int value);
56392
56393 int (*parse)(struct hid_device *hdev);
56394-};
56395+} __no_const;
56396
56397 #define PM_HINT_FULLON 1<<5
56398 #define PM_HINT_NORMAL 1<<1
56399diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
56400--- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
56401+++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
56402@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
56403 kunmap_atomic(kaddr, KM_USER0);
56404 }
56405
56406+static inline void sanitize_highpage(struct page *page)
56407+{
56408+ void *kaddr;
56409+ unsigned long flags;
56410+
56411+ local_irq_save(flags);
56412+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
56413+ clear_page(kaddr);
56414+ kunmap_atomic(kaddr, KM_CLEARPAGE);
56415+ local_irq_restore(flags);
56416+}
56417+
56418 static inline void zero_user_segments(struct page *page,
56419 unsigned start1, unsigned end1,
56420 unsigned start2, unsigned end2)
56421diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
56422--- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
56423+++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
56424@@ -346,6 +346,7 @@ struct i2c_algorithm {
56425 /* To determine what the adapter supports */
56426 u32 (*functionality) (struct i2c_adapter *);
56427 };
56428+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
56429
56430 /*
56431 * i2c_adapter is the structure used to identify a physical i2c bus along
56432diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
56433--- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
56434+++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
56435@@ -564,7 +564,7 @@ struct i2o_controller {
56436 struct i2o_device *exec; /* Executive */
56437 #if BITS_PER_LONG == 64
56438 spinlock_t context_list_lock; /* lock for context_list */
56439- atomic_t context_list_counter; /* needed for unique contexts */
56440+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56441 struct list_head context_list; /* list of context id's
56442 and pointers */
56443 #endif
56444diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
56445--- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
56446+++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
56447@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
56448
56449 /* Each module must use one module_init(). */
56450 #define module_init(initfn) \
56451- static inline initcall_t __inittest(void) \
56452+ static inline __used initcall_t __inittest(void) \
56453 { return initfn; } \
56454 int init_module(void) __attribute__((alias(#initfn)));
56455
56456 /* This is only required if you want to be unloadable. */
56457 #define module_exit(exitfn) \
56458- static inline exitcall_t __exittest(void) \
56459+ static inline __used exitcall_t __exittest(void) \
56460 { return exitfn; } \
56461 void cleanup_module(void) __attribute__((alias(#exitfn)));
56462
56463diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
56464--- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
56465+++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
56466@@ -126,6 +126,12 @@ extern struct cred init_cred;
56467 # define INIT_PERF_EVENTS(tsk)
56468 #endif
56469
56470+#ifdef CONFIG_X86
56471+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56472+#else
56473+#define INIT_TASK_THREAD_INFO
56474+#endif
56475+
56476 /*
56477 * INIT_TASK is used to set up the first task table, touch at
56478 * your own risk!. Base=0, limit=0x1fffff (=2MB)
56479@@ -164,6 +170,7 @@ extern struct cred init_cred;
56480 RCU_INIT_POINTER(.cred, &init_cred), \
56481 .comm = "swapper", \
56482 .thread = INIT_THREAD, \
56483+ INIT_TASK_THREAD_INFO \
56484 .fs = &init_fs, \
56485 .files = &init_files, \
56486 .signal = &init_signals, \
56487diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
56488--- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
56489+++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
56490@@ -296,7 +296,7 @@ struct iommu_flush {
56491 u8 fm, u64 type);
56492 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
56493 unsigned int size_order, u64 type);
56494-};
56495+} __no_const;
56496
56497 enum {
56498 SR_DMAR_FECTL_REG,
56499diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
56500--- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
56501+++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
56502@@ -422,7 +422,7 @@ enum
56503 /* map softirq index to softirq name. update 'softirq_to_name' in
56504 * kernel/softirq.c when adding a new softirq.
56505 */
56506-extern char *softirq_to_name[NR_SOFTIRQS];
56507+extern const char * const softirq_to_name[NR_SOFTIRQS];
56508
56509 /* softirq mask and active fields moved to irq_cpustat_t in
56510 * asm/hardirq.h to get better cache usage. KAO
56511@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56512
56513 struct softirq_action
56514 {
56515- void (*action)(struct softirq_action *);
56516+ void (*action)(void);
56517 };
56518
56519 asmlinkage void do_softirq(void);
56520 asmlinkage void __do_softirq(void);
56521-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56522+extern void open_softirq(int nr, void (*action)(void));
56523 extern void softirq_init(void);
56524 static inline void __raise_softirq_irqoff(unsigned int nr)
56525 {
56526diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
56527--- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
56528+++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
56529@@ -15,7 +15,8 @@
56530
56531 struct module;
56532
56533-#ifdef CONFIG_KALLSYMS
56534+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56535+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56536 /* Lookup the address for a symbol. Returns 0 if not found. */
56537 unsigned long kallsyms_lookup_name(const char *name);
56538
56539@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
56540 /* Stupid that this does nothing, but I didn't create this mess. */
56541 #define __print_symbol(fmt, addr)
56542 #endif /*CONFIG_KALLSYMS*/
56543+#else /* when included by kallsyms.c, vsnprintf.c, or
56544+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56545+extern void __print_symbol(const char *fmt, unsigned long address);
56546+extern int sprint_backtrace(char *buffer, unsigned long address);
56547+extern int sprint_symbol(char *buffer, unsigned long address);
56548+const char *kallsyms_lookup(unsigned long addr,
56549+ unsigned long *symbolsize,
56550+ unsigned long *offset,
56551+ char **modname, char *namebuf);
56552+#endif
56553
56554 /* This macro allows us to keep printk typechecking */
56555 static void __check_printsym_format(const char *fmt, ...)
56556diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
56557--- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
56558+++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
56559@@ -53,7 +53,7 @@ extern int kgdb_connected;
56560 extern int kgdb_io_module_registered;
56561
56562 extern atomic_t kgdb_setting_breakpoint;
56563-extern atomic_t kgdb_cpu_doing_single_step;
56564+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56565
56566 extern struct task_struct *kgdb_usethread;
56567 extern struct task_struct *kgdb_contthread;
56568@@ -251,7 +251,7 @@ struct kgdb_arch {
56569 void (*disable_hw_break)(struct pt_regs *regs);
56570 void (*remove_all_hw_break)(void);
56571 void (*correct_hw_break)(void);
56572-};
56573+} __do_const;
56574
56575 /**
56576 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
56577@@ -276,7 +276,7 @@ struct kgdb_io {
56578 void (*pre_exception) (void);
56579 void (*post_exception) (void);
56580 int is_console;
56581-};
56582+} __do_const;
56583
56584 extern struct kgdb_arch arch_kgdb_ops;
56585
56586diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
56587--- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
56588+++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
56589@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
56590 * usually useless though. */
56591 extern int __request_module(bool wait, const char *name, ...) \
56592 __attribute__((format(printf, 2, 3)));
56593+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56594+ __attribute__((format(printf, 3, 4)));
56595 #define request_module(mod...) __request_module(true, mod)
56596 #define request_module_nowait(mod...) __request_module(false, mod)
56597 #define try_then_request_module(x, mod...) \
56598diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
56599--- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
56600+++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
56601@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56602 void vcpu_load(struct kvm_vcpu *vcpu);
56603 void vcpu_put(struct kvm_vcpu *vcpu);
56604
56605-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56606+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56607 struct module *module);
56608 void kvm_exit(void);
56609
56610@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56611 struct kvm_guest_debug *dbg);
56612 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56613
56614-int kvm_arch_init(void *opaque);
56615+int kvm_arch_init(const void *opaque);
56616 void kvm_arch_exit(void);
56617
56618 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56619diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
56620--- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
56621+++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
56622@@ -899,7 +899,7 @@ struct ata_port_operations {
56623 * fields must be pointers.
56624 */
56625 const struct ata_port_operations *inherits;
56626-};
56627+} __do_const;
56628
56629 struct ata_port_info {
56630 unsigned long flags;
56631diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
56632--- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
56633+++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
56634@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
56635 int region);
56636 void * (*mca_transform_memory)(struct mca_device *,
56637 void *memory);
56638-};
56639+} __no_const;
56640
56641 struct mca_bus {
56642 u64 default_dma_mask;
56643diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
56644--- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
56645+++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
56646@@ -144,7 +144,7 @@ struct memory_accessor {
56647 size_t count);
56648 ssize_t (*write)(struct memory_accessor *, const char *buf,
56649 off_t offset, size_t count);
56650-};
56651+} __no_const;
56652
56653 /*
56654 * Kernel text modification mutex, used for code patching. Users of this lock
56655diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
56656--- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
56657+++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
56658@@ -234,6 +234,7 @@ struct abx500_ops {
56659 int (*event_registers_startup_state_get) (struct device *, u8 *);
56660 int (*startup_irq_enabled) (struct device *, unsigned int);
56661 };
56662+typedef struct abx500_ops __no_const abx500_ops_no_const;
56663
56664 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
56665 void abx500_remove_ops(struct device *dev);
56666diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
56667--- linux-3.0.4/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
56668+++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
56669@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
56670
56671 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56672 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56673+
56674+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56675+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56676+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56677+#else
56678 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56679+#endif
56680+
56681 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56682 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56683
56684@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
56685 int set_page_dirty_lock(struct page *page);
56686 int clear_page_dirty_for_io(struct page *page);
56687
56688-/* Is the vma a continuation of the stack vma above it? */
56689-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
56690-{
56691- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56692-}
56693-
56694-static inline int stack_guard_page_start(struct vm_area_struct *vma,
56695- unsigned long addr)
56696-{
56697- return (vma->vm_flags & VM_GROWSDOWN) &&
56698- (vma->vm_start == addr) &&
56699- !vma_growsdown(vma->vm_prev, addr);
56700-}
56701-
56702-/* Is the vma a continuation of the stack vma below it? */
56703-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
56704-{
56705- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
56706-}
56707-
56708-static inline int stack_guard_page_end(struct vm_area_struct *vma,
56709- unsigned long addr)
56710-{
56711- return (vma->vm_flags & VM_GROWSUP) &&
56712- (vma->vm_end == addr) &&
56713- !vma_growsup(vma->vm_next, addr);
56714-}
56715-
56716 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56717 unsigned long old_addr, struct vm_area_struct *new_vma,
56718 unsigned long new_addr, unsigned long len);
56719@@ -1169,6 +1148,15 @@ struct shrinker {
56720 extern void register_shrinker(struct shrinker *);
56721 extern void unregister_shrinker(struct shrinker *);
56722
56723+#ifdef CONFIG_MMU
56724+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
56725+#else
56726+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
56727+{
56728+ return __pgprot(0);
56729+}
56730+#endif
56731+
56732 int vma_wants_writenotify(struct vm_area_struct *vma);
56733
56734 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
56735@@ -1452,6 +1440,7 @@ out:
56736 }
56737
56738 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56739+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56740
56741 extern unsigned long do_brk(unsigned long, unsigned long);
56742
56743@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
56744 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56745 struct vm_area_struct **pprev);
56746
56747+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56748+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56749+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56750+
56751 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56752 NULL if none. Assume start_addr < end_addr. */
56753 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56754@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
56755 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56756 }
56757
56758-#ifdef CONFIG_MMU
56759-pgprot_t vm_get_page_prot(unsigned long vm_flags);
56760-#else
56761-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
56762-{
56763- return __pgprot(0);
56764-}
56765-#endif
56766-
56767 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56768 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56769 unsigned long pfn, unsigned long size, pgprot_t);
56770@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
56771 extern int sysctl_memory_failure_early_kill;
56772 extern int sysctl_memory_failure_recovery;
56773 extern void shake_page(struct page *p, int access);
56774-extern atomic_long_t mce_bad_pages;
56775+extern atomic_long_unchecked_t mce_bad_pages;
56776 extern int soft_offline_page(struct page *page, int flags);
56777
56778 extern void dump_page(struct page *page);
56779@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
56780 unsigned int pages_per_huge_page);
56781 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
56782
56783+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56784+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56785+#else
56786+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56787+#endif
56788+
56789 #endif /* __KERNEL__ */
56790 #endif /* _LINUX_MM_H */
56791diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
56792--- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
56793+++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
56794@@ -184,6 +184,8 @@ struct vm_area_struct {
56795 #ifdef CONFIG_NUMA
56796 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56797 #endif
56798+
56799+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56800 };
56801
56802 struct core_thread {
56803@@ -316,6 +318,24 @@ struct mm_struct {
56804 #ifdef CONFIG_CPUMASK_OFFSTACK
56805 struct cpumask cpumask_allocation;
56806 #endif
56807+
56808+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56809+ unsigned long pax_flags;
56810+#endif
56811+
56812+#ifdef CONFIG_PAX_DLRESOLVE
56813+ unsigned long call_dl_resolve;
56814+#endif
56815+
56816+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56817+ unsigned long call_syscall;
56818+#endif
56819+
56820+#ifdef CONFIG_PAX_ASLR
56821+ unsigned long delta_mmap; /* randomized offset */
56822+ unsigned long delta_stack; /* randomized offset */
56823+#endif
56824+
56825 };
56826
56827 static inline void mm_init_cpumask(struct mm_struct *mm)
56828diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
56829--- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
56830+++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
56831@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
56832 */
56833 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56834 ({ \
56835- pte_t __pte; \
56836+ pte_t ___pte; \
56837 struct vm_area_struct *___vma = __vma; \
56838 unsigned long ___address = __address; \
56839- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56840+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56841 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56842- __pte; \
56843+ ___pte; \
56844 })
56845
56846 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
56847diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
56848--- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
56849+++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
56850@@ -350,7 +350,7 @@ struct zone {
56851 unsigned long flags; /* zone flags, see below */
56852
56853 /* Zone statistics */
56854- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56855+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56856
56857 /*
56858 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
56859diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
56860--- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
56861+++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
56862@@ -12,7 +12,7 @@
56863 typedef unsigned long kernel_ulong_t;
56864 #endif
56865
56866-#define PCI_ANY_ID (~0)
56867+#define PCI_ANY_ID ((__u16)~0)
56868
56869 struct pci_device_id {
56870 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56871@@ -131,7 +131,7 @@ struct usb_device_id {
56872 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56873 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56874
56875-#define HID_ANY_ID (~0)
56876+#define HID_ANY_ID (~0U)
56877
56878 struct hid_device_id {
56879 __u16 bus;
56880diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
56881--- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
56882+++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
56883@@ -16,6 +16,7 @@
56884 #include <linux/kobject.h>
56885 #include <linux/moduleparam.h>
56886 #include <linux/tracepoint.h>
56887+#include <linux/fs.h>
56888
56889 #include <linux/percpu.h>
56890 #include <asm/module.h>
56891@@ -325,19 +326,16 @@ struct module
56892 int (*init)(void);
56893
56894 /* If this is non-NULL, vfree after init() returns */
56895- void *module_init;
56896+ void *module_init_rx, *module_init_rw;
56897
56898 /* Here is the actual code + data, vfree'd on unload. */
56899- void *module_core;
56900+ void *module_core_rx, *module_core_rw;
56901
56902 /* Here are the sizes of the init and core sections */
56903- unsigned int init_size, core_size;
56904+ unsigned int init_size_rw, core_size_rw;
56905
56906 /* The size of the executable code in each section. */
56907- unsigned int init_text_size, core_text_size;
56908-
56909- /* Size of RO sections of the module (text+rodata) */
56910- unsigned int init_ro_size, core_ro_size;
56911+ unsigned int init_size_rx, core_size_rx;
56912
56913 /* Arch-specific module values */
56914 struct mod_arch_specific arch;
56915@@ -393,6 +391,10 @@ struct module
56916 #ifdef CONFIG_EVENT_TRACING
56917 struct ftrace_event_call **trace_events;
56918 unsigned int num_trace_events;
56919+ struct file_operations trace_id;
56920+ struct file_operations trace_enable;
56921+ struct file_operations trace_format;
56922+ struct file_operations trace_filter;
56923 #endif
56924 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
56925 unsigned int num_ftrace_callsites;
56926@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
56927 bool is_module_percpu_address(unsigned long addr);
56928 bool is_module_text_address(unsigned long addr);
56929
56930+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56931+{
56932+
56933+#ifdef CONFIG_PAX_KERNEXEC
56934+ if (ktla_ktva(addr) >= (unsigned long)start &&
56935+ ktla_ktva(addr) < (unsigned long)start + size)
56936+ return 1;
56937+#endif
56938+
56939+ return ((void *)addr >= start && (void *)addr < start + size);
56940+}
56941+
56942+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56943+{
56944+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56945+}
56946+
56947+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56948+{
56949+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56950+}
56951+
56952+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56953+{
56954+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56955+}
56956+
56957+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56958+{
56959+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56960+}
56961+
56962 static inline int within_module_core(unsigned long addr, struct module *mod)
56963 {
56964- return (unsigned long)mod->module_core <= addr &&
56965- addr < (unsigned long)mod->module_core + mod->core_size;
56966+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56967 }
56968
56969 static inline int within_module_init(unsigned long addr, struct module *mod)
56970 {
56971- return (unsigned long)mod->module_init <= addr &&
56972- addr < (unsigned long)mod->module_init + mod->init_size;
56973+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56974 }
56975
56976 /* Search for module by name: must hold module_mutex. */
56977diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
56978--- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
56979+++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
56980@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56981 sections. Returns NULL on failure. */
56982 void *module_alloc(unsigned long size);
56983
56984+#ifdef CONFIG_PAX_KERNEXEC
56985+void *module_alloc_exec(unsigned long size);
56986+#else
56987+#define module_alloc_exec(x) module_alloc(x)
56988+#endif
56989+
56990 /* Free memory returned from module_alloc. */
56991 void module_free(struct module *mod, void *module_region);
56992
56993+#ifdef CONFIG_PAX_KERNEXEC
56994+void module_free_exec(struct module *mod, void *module_region);
56995+#else
56996+#define module_free_exec(x, y) module_free((x), (y))
56997+#endif
56998+
56999 /* Apply the given relocation to the (simplified) ELF. Return -error
57000 or 0. */
57001 int apply_relocate(Elf_Shdr *sechdrs,
57002diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
57003--- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
57004+++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
57005@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
57006 * @len is usually just sizeof(string).
57007 */
57008 #define module_param_string(name, string, len, perm) \
57009- static const struct kparam_string __param_string_##name \
57010+ static const struct kparam_string __param_string_##name __used \
57011 = { len, string }; \
57012 __module_param_call(MODULE_PARAM_PREFIX, name, \
57013 &param_ops_string, \
57014@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
57015 * module_param_named() for why this might be necessary.
57016 */
57017 #define module_param_array_named(name, array, type, nump, perm) \
57018- static const struct kparam_array __param_arr_##name \
57019+ static const struct kparam_array __param_arr_##name __used \
57020 = { .max = ARRAY_SIZE(array), .num = nump, \
57021 .ops = &param_ops_##type, \
57022 .elemsize = sizeof(array[0]), .elem = array }; \
57023diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
57024--- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
57025+++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
57026@@ -24,7 +24,7 @@ struct nameidata {
57027 unsigned seq;
57028 int last_type;
57029 unsigned depth;
57030- char *saved_names[MAX_NESTED_LINKS + 1];
57031+ const char *saved_names[MAX_NESTED_LINKS + 1];
57032
57033 /* Intent data */
57034 union {
57035@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
57036 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57037 extern void unlock_rename(struct dentry *, struct dentry *);
57038
57039-static inline void nd_set_link(struct nameidata *nd, char *path)
57040+static inline void nd_set_link(struct nameidata *nd, const char *path)
57041 {
57042 nd->saved_names[nd->depth] = path;
57043 }
57044
57045-static inline char *nd_get_link(struct nameidata *nd)
57046+static inline const char *nd_get_link(const struct nameidata *nd)
57047 {
57048 return nd->saved_names[nd->depth];
57049 }
57050diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
57051--- linux-3.0.4/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
57052+++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
57053@@ -979,6 +979,7 @@ struct net_device_ops {
57054 int (*ndo_set_features)(struct net_device *dev,
57055 u32 features);
57056 };
57057+typedef struct net_device_ops __no_const net_device_ops_no_const;
57058
57059 /*
57060 * The DEVICE structure.
57061diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
57062--- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57063+++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
57064@@ -0,0 +1,9 @@
57065+#ifndef _LINUX_NETFILTER_XT_GRADM_H
57066+#define _LINUX_NETFILTER_XT_GRADM_H 1
57067+
57068+struct xt_gradm_mtinfo {
57069+ __u16 flags;
57070+ __u16 invflags;
57071+};
57072+
57073+#endif
57074diff -urNp linux-3.0.4/include/linux/of_pdt.h linux-3.0.4/include/linux/of_pdt.h
57075--- linux-3.0.4/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
57076+++ linux-3.0.4/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
57077@@ -32,7 +32,7 @@ struct of_pdt_ops {
57078
57079 /* return 0 on success; fill in 'len' with number of bytes in path */
57080 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
57081-};
57082+} __no_const;
57083
57084 extern void *prom_early_alloc(unsigned long size);
57085
57086diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
57087--- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
57088+++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
57089@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
57090 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57091 char const * name, ulong * val);
57092
57093-/** Create a file for read-only access to an atomic_t. */
57094+/** Create a file for read-only access to an atomic_unchecked_t. */
57095 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57096- char const * name, atomic_t * val);
57097+ char const * name, atomic_unchecked_t * val);
57098
57099 /** create a directory */
57100 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57101diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
57102--- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
57103+++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
57104@@ -129,7 +129,7 @@ struct parallel_data {
57105 struct padata_instance *pinst;
57106 struct padata_parallel_queue __percpu *pqueue;
57107 struct padata_serial_queue __percpu *squeue;
57108- atomic_t seq_nr;
57109+ atomic_unchecked_t seq_nr;
57110 atomic_t reorder_objects;
57111 atomic_t refcnt;
57112 unsigned int max_seq_nr;
57113diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
57114--- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
57115+++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
57116@@ -761,8 +761,8 @@ struct perf_event {
57117
57118 enum perf_event_active_state state;
57119 unsigned int attach_state;
57120- local64_t count;
57121- atomic64_t child_count;
57122+ local64_t count; /* PaX: fix it one day */
57123+ atomic64_unchecked_t child_count;
57124
57125 /*
57126 * These are the total time in nanoseconds that the event
57127@@ -813,8 +813,8 @@ struct perf_event {
57128 * These accumulate total time (in nanoseconds) that children
57129 * events have been enabled and running, respectively.
57130 */
57131- atomic64_t child_total_time_enabled;
57132- atomic64_t child_total_time_running;
57133+ atomic64_unchecked_t child_total_time_enabled;
57134+ atomic64_unchecked_t child_total_time_running;
57135
57136 /*
57137 * Protect attach/detach and child_list:
57138diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
57139--- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
57140+++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
57141@@ -46,9 +46,9 @@ struct pipe_buffer {
57142 struct pipe_inode_info {
57143 wait_queue_head_t wait;
57144 unsigned int nrbufs, curbuf, buffers;
57145- unsigned int readers;
57146- unsigned int writers;
57147- unsigned int waiting_writers;
57148+ atomic_t readers;
57149+ atomic_t writers;
57150+ atomic_t waiting_writers;
57151 unsigned int r_counter;
57152 unsigned int w_counter;
57153 struct page *tmp_page;
57154diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
57155--- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
57156+++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
57157@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
57158
57159 static inline void pm_runtime_mark_last_busy(struct device *dev)
57160 {
57161- ACCESS_ONCE(dev->power.last_busy) = jiffies;
57162+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
57163 }
57164
57165 #else /* !CONFIG_PM_RUNTIME */
57166diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
57167--- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
57168+++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
57169@@ -19,8 +19,8 @@
57170 * under normal circumstances, used to verify that nobody uses
57171 * non-initialized list entries.
57172 */
57173-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57174-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57175+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57176+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57177
57178 /********** include/linux/timer.h **********/
57179 /*
57180diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
57181--- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
57182+++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
57183@@ -115,7 +115,7 @@ struct preempt_ops {
57184 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
57185 void (*sched_out)(struct preempt_notifier *notifier,
57186 struct task_struct *next);
57187-};
57188+} __no_const;
57189
57190 /**
57191 * preempt_notifier - key for installing preemption notifiers
57192diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
57193--- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
57194+++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
57195@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
57196 return proc_create_data(name, mode, parent, proc_fops, NULL);
57197 }
57198
57199+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
57200+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
57201+{
57202+#ifdef CONFIG_GRKERNSEC_PROC_USER
57203+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
57204+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57205+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
57206+#else
57207+ return proc_create_data(name, mode, parent, proc_fops, NULL);
57208+#endif
57209+}
57210+
57211+
57212 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
57213 mode_t mode, struct proc_dir_entry *base,
57214 read_proc_t *read_proc, void * data)
57215@@ -258,7 +271,7 @@ union proc_op {
57216 int (*proc_show)(struct seq_file *m,
57217 struct pid_namespace *ns, struct pid *pid,
57218 struct task_struct *task);
57219-};
57220+} __no_const;
57221
57222 struct ctl_table_header;
57223 struct ctl_table;
57224diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
57225--- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
57226+++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
57227@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
57228 extern void exit_ptrace(struct task_struct *tracer);
57229 #define PTRACE_MODE_READ 1
57230 #define PTRACE_MODE_ATTACH 2
57231-/* Returns 0 on success, -errno on denial. */
57232-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
57233 /* Returns true on success, false on denial. */
57234 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
57235+/* Returns true on success, false on denial. */
57236+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
57237
57238 static inline int ptrace_reparented(struct task_struct *child)
57239 {
57240diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
57241--- linux-3.0.4/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
57242+++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
57243@@ -69,12 +69,17 @@ void srandom32(u32 seed);
57244
57245 u32 prandom32(struct rnd_state *);
57246
57247+static inline unsigned long pax_get_random_long(void)
57248+{
57249+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
57250+}
57251+
57252 /*
57253 * Handle minimum values for seeds
57254 */
57255 static inline u32 __seed(u32 x, u32 m)
57256 {
57257- return (x < m) ? x + m : x;
57258+ return (x <= m) ? x + m + 1 : x;
57259 }
57260
57261 /**
57262diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
57263--- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
57264+++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
57265@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
57266 * Architecture-specific implementations of sys_reboot commands.
57267 */
57268
57269-extern void machine_restart(char *cmd);
57270-extern void machine_halt(void);
57271-extern void machine_power_off(void);
57272+extern void machine_restart(char *cmd) __noreturn;
57273+extern void machine_halt(void) __noreturn;
57274+extern void machine_power_off(void) __noreturn;
57275
57276 extern void machine_shutdown(void);
57277 struct pt_regs;
57278@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
57279 */
57280
57281 extern void kernel_restart_prepare(char *cmd);
57282-extern void kernel_restart(char *cmd);
57283-extern void kernel_halt(void);
57284-extern void kernel_power_off(void);
57285+extern void kernel_restart(char *cmd) __noreturn;
57286+extern void kernel_halt(void) __noreturn;
57287+extern void kernel_power_off(void) __noreturn;
57288
57289 extern int C_A_D; /* for sysctl */
57290 void ctrl_alt_del(void);
57291@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
57292 * Emergency restart, callable from an interrupt handler.
57293 */
57294
57295-extern void emergency_restart(void);
57296+extern void emergency_restart(void) __noreturn;
57297 #include <asm/emergency-restart.h>
57298
57299 #endif
57300diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
57301--- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
57302+++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
57303@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
57304 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57305
57306 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57307-#define get_generation(s) atomic_read (&fs_generation(s))
57308+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57309 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57310 #define __fs_changed(gen,s) (gen != get_generation (s))
57311 #define fs_changed(gen,s) \
57312diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
57313--- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
57314+++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
57315@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
57316 /* Comment? -Hans */
57317 wait_queue_head_t s_wait;
57318 /* To be obsoleted soon by per buffer seals.. -Hans */
57319- atomic_t s_generation_counter; // increased by one every time the
57320+ atomic_unchecked_t s_generation_counter; // increased by one every time the
57321 // tree gets re-balanced
57322 unsigned long s_properties; /* File system properties. Currently holds
57323 on-disk FS format */
57324diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
57325--- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
57326+++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
57327@@ -159,7 +159,7 @@ struct rchan_callbacks
57328 * The callback should return 0 if successful, negative if not.
57329 */
57330 int (*remove_buf_file)(struct dentry *dentry);
57331-};
57332+} __no_const;
57333
57334 /*
57335 * CONFIG_RELAY kernel API, kernel/relay.c
57336diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
57337--- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
57338+++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
57339@@ -147,6 +147,7 @@ struct rfkill_ops {
57340 void (*query)(struct rfkill *rfkill, void *data);
57341 int (*set_block)(void *data, bool blocked);
57342 };
57343+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
57344
57345 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
57346 /**
57347diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
57348--- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
57349+++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
57350@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
57351 void anon_vma_init(void); /* create anon_vma_cachep */
57352 int anon_vma_prepare(struct vm_area_struct *);
57353 void unlink_anon_vmas(struct vm_area_struct *);
57354-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
57355-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
57356+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
57357+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
57358 void __anon_vma_link(struct vm_area_struct *);
57359
57360 static inline void anon_vma_merge(struct vm_area_struct *vma,
57361diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
57362--- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
57363+++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
57364@@ -100,6 +100,7 @@ struct bio_list;
57365 struct fs_struct;
57366 struct perf_event_context;
57367 struct blk_plug;
57368+struct linux_binprm;
57369
57370 /*
57371 * List of flags we want to share for kernel threads,
57372@@ -380,10 +381,13 @@ struct user_namespace;
57373 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57374
57375 extern int sysctl_max_map_count;
57376+extern unsigned long sysctl_heap_stack_gap;
57377
57378 #include <linux/aio.h>
57379
57380 #ifdef CONFIG_MMU
57381+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57382+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57383 extern void arch_pick_mmap_layout(struct mm_struct *mm);
57384 extern unsigned long
57385 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57386@@ -629,6 +633,17 @@ struct signal_struct {
57387 #ifdef CONFIG_TASKSTATS
57388 struct taskstats *stats;
57389 #endif
57390+
57391+#ifdef CONFIG_GRKERNSEC
57392+ u32 curr_ip;
57393+ u32 saved_ip;
57394+ u32 gr_saddr;
57395+ u32 gr_daddr;
57396+ u16 gr_sport;
57397+ u16 gr_dport;
57398+ u8 used_accept:1;
57399+#endif
57400+
57401 #ifdef CONFIG_AUDIT
57402 unsigned audit_tty;
57403 struct tty_audit_buf *tty_audit_buf;
57404@@ -710,6 +725,11 @@ struct user_struct {
57405 struct key *session_keyring; /* UID's default session keyring */
57406 #endif
57407
57408+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57409+ unsigned int banned;
57410+ unsigned long ban_expires;
57411+#endif
57412+
57413 /* Hash table maintenance information */
57414 struct hlist_node uidhash_node;
57415 uid_t uid;
57416@@ -1340,8 +1360,8 @@ struct task_struct {
57417 struct list_head thread_group;
57418
57419 struct completion *vfork_done; /* for vfork() */
57420- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57421- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57422+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57423+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57424
57425 cputime_t utime, stime, utimescaled, stimescaled;
57426 cputime_t gtime;
57427@@ -1357,13 +1377,6 @@ struct task_struct {
57428 struct task_cputime cputime_expires;
57429 struct list_head cpu_timers[3];
57430
57431-/* process credentials */
57432- const struct cred __rcu *real_cred; /* objective and real subjective task
57433- * credentials (COW) */
57434- const struct cred __rcu *cred; /* effective (overridable) subjective task
57435- * credentials (COW) */
57436- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57437-
57438 char comm[TASK_COMM_LEN]; /* executable name excluding path
57439 - access with [gs]et_task_comm (which lock
57440 it with task_lock())
57441@@ -1380,8 +1393,16 @@ struct task_struct {
57442 #endif
57443 /* CPU-specific state of this task */
57444 struct thread_struct thread;
57445+/* thread_info moved to task_struct */
57446+#ifdef CONFIG_X86
57447+ struct thread_info tinfo;
57448+#endif
57449 /* filesystem information */
57450 struct fs_struct *fs;
57451+
57452+ const struct cred __rcu *cred; /* effective (overridable) subjective task
57453+ * credentials (COW) */
57454+
57455 /* open file information */
57456 struct files_struct *files;
57457 /* namespaces */
57458@@ -1428,6 +1449,11 @@ struct task_struct {
57459 struct rt_mutex_waiter *pi_blocked_on;
57460 #endif
57461
57462+/* process credentials */
57463+ const struct cred __rcu *real_cred; /* objective and real subjective task
57464+ * credentials (COW) */
57465+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57466+
57467 #ifdef CONFIG_DEBUG_MUTEXES
57468 /* mutex deadlock detection */
57469 struct mutex_waiter *blocked_on;
57470@@ -1538,6 +1564,21 @@ struct task_struct {
57471 unsigned long default_timer_slack_ns;
57472
57473 struct list_head *scm_work_list;
57474+
57475+#ifdef CONFIG_GRKERNSEC
57476+ /* grsecurity */
57477+ struct dentry *gr_chroot_dentry;
57478+ struct acl_subject_label *acl;
57479+ struct acl_role_label *role;
57480+ struct file *exec_file;
57481+ u16 acl_role_id;
57482+ /* is this the task that authenticated to the special role */
57483+ u8 acl_sp_role;
57484+ u8 is_writable;
57485+ u8 brute;
57486+ u8 gr_is_chrooted;
57487+#endif
57488+
57489 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57490 /* Index of current stored address in ret_stack */
57491 int curr_ret_stack;
57492@@ -1572,6 +1613,57 @@ struct task_struct {
57493 #endif
57494 };
57495
57496+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57497+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57498+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57499+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57500+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57501+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57502+
57503+#ifdef CONFIG_PAX_SOFTMODE
57504+extern int pax_softmode;
57505+#endif
57506+
57507+extern int pax_check_flags(unsigned long *);
57508+
57509+/* if tsk != current then task_lock must be held on it */
57510+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57511+static inline unsigned long pax_get_flags(struct task_struct *tsk)
57512+{
57513+ if (likely(tsk->mm))
57514+ return tsk->mm->pax_flags;
57515+ else
57516+ return 0UL;
57517+}
57518+
57519+/* if tsk != current then task_lock must be held on it */
57520+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57521+{
57522+ if (likely(tsk->mm)) {
57523+ tsk->mm->pax_flags = flags;
57524+ return 0;
57525+ }
57526+ return -EINVAL;
57527+}
57528+#endif
57529+
57530+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57531+extern void pax_set_initial_flags(struct linux_binprm *bprm);
57532+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57533+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57534+#endif
57535+
57536+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57537+extern void pax_report_insns(void *pc, void *sp);
57538+extern void pax_report_refcount_overflow(struct pt_regs *regs);
57539+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
57540+
57541+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57542+extern void pax_track_stack(void);
57543+#else
57544+static inline void pax_track_stack(void) {}
57545+#endif
57546+
57547 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57548 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
57549
57550@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
57551 #define PF_DUMPCORE 0x00000200 /* dumped core */
57552 #define PF_SIGNALED 0x00000400 /* killed by a signal */
57553 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
57554+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
57555 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
57556 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
57557 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
57558@@ -2056,7 +2149,9 @@ void yield(void);
57559 extern struct exec_domain default_exec_domain;
57560
57561 union thread_union {
57562+#ifndef CONFIG_X86
57563 struct thread_info thread_info;
57564+#endif
57565 unsigned long stack[THREAD_SIZE/sizeof(long)];
57566 };
57567
57568@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
57569 */
57570
57571 extern struct task_struct *find_task_by_vpid(pid_t nr);
57572+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
57573 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
57574 struct pid_namespace *ns);
57575
57576@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
57577 extern void exit_itimers(struct signal_struct *);
57578 extern void flush_itimer_signals(void);
57579
57580-extern NORET_TYPE void do_group_exit(int);
57581+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57582
57583 extern void daemonize(const char *, ...);
57584 extern int allow_signal(int);
57585@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
57586
57587 #endif
57588
57589-static inline int object_is_on_stack(void *obj)
57590+static inline int object_starts_on_stack(void *obj)
57591 {
57592- void *stack = task_stack_page(current);
57593+ const void *stack = task_stack_page(current);
57594
57595 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57596 }
57597
57598+#ifdef CONFIG_PAX_USERCOPY
57599+extern int object_is_on_stack(const void *obj, unsigned long len);
57600+#endif
57601+
57602 extern void thread_info_cache_init(void);
57603
57604 #ifdef CONFIG_DEBUG_STACK_USAGE
57605diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
57606--- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
57607+++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
57608@@ -43,7 +43,8 @@ struct screen_info {
57609 __u16 pages; /* 0x32 */
57610 __u16 vesa_attributes; /* 0x34 */
57611 __u32 capabilities; /* 0x36 */
57612- __u8 _reserved[6]; /* 0x3a */
57613+ __u16 vesapm_size; /* 0x3a */
57614+ __u8 _reserved[4]; /* 0x3c */
57615 } __attribute__((packed));
57616
57617 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57618diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
57619--- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
57620+++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
57621@@ -36,6 +36,7 @@
57622 #include <linux/key.h>
57623 #include <linux/xfrm.h>
57624 #include <linux/slab.h>
57625+#include <linux/grsecurity.h>
57626 #include <net/flow.h>
57627
57628 /* Maximum number of letters for an LSM name string */
57629diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
57630--- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
57631+++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
57632@@ -32,6 +32,7 @@ struct seq_operations {
57633 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
57634 int (*show) (struct seq_file *m, void *v);
57635 };
57636+typedef struct seq_operations __no_const seq_operations_no_const;
57637
57638 #define SEQ_SKIP 1
57639
57640diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
57641--- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
57642+++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
57643@@ -10,7 +10,7 @@
57644
57645 #define SHMEM_NR_DIRECT 16
57646
57647-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
57648+#define SHMEM_SYMLINK_INLINE_LEN 64
57649
57650 struct shmem_inode_info {
57651 spinlock_t lock;
57652diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
57653--- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
57654+++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
57655@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57656 pid_t shm_cprid;
57657 pid_t shm_lprid;
57658 struct user_struct *mlock_user;
57659+#ifdef CONFIG_GRKERNSEC
57660+ time_t shm_createtime;
57661+ pid_t shm_lapid;
57662+#endif
57663 };
57664
57665 /* shm_mode upper byte flags */
57666diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
57667--- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
57668+++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
57669@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
57670 */
57671 static inline int skb_queue_empty(const struct sk_buff_head *list)
57672 {
57673- return list->next == (struct sk_buff *)list;
57674+ return list->next == (const struct sk_buff *)list;
57675 }
57676
57677 /**
57678@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
57679 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57680 const struct sk_buff *skb)
57681 {
57682- return skb->next == (struct sk_buff *)list;
57683+ return skb->next == (const struct sk_buff *)list;
57684 }
57685
57686 /**
57687@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
57688 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57689 const struct sk_buff *skb)
57690 {
57691- return skb->prev == (struct sk_buff *)list;
57692+ return skb->prev == (const struct sk_buff *)list;
57693 }
57694
57695 /**
57696@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
57697 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
57698 */
57699 #ifndef NET_SKB_PAD
57700-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
57701+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
57702 #endif
57703
57704 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57705diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
57706--- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
57707+++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
57708@@ -96,10 +96,10 @@ struct kmem_cache {
57709 unsigned long node_allocs;
57710 unsigned long node_frees;
57711 unsigned long node_overflow;
57712- atomic_t allochit;
57713- atomic_t allocmiss;
57714- atomic_t freehit;
57715- atomic_t freemiss;
57716+ atomic_unchecked_t allochit;
57717+ atomic_unchecked_t allocmiss;
57718+ atomic_unchecked_t freehit;
57719+ atomic_unchecked_t freemiss;
57720
57721 /*
57722 * If debugging is enabled, then the allocator can add additional
57723diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
57724--- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
57725+++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
57726@@ -11,12 +11,20 @@
57727
57728 #include <linux/gfp.h>
57729 #include <linux/types.h>
57730+#include <linux/err.h>
57731
57732 /*
57733 * Flags to pass to kmem_cache_create().
57734 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57735 */
57736 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57737+
57738+#ifdef CONFIG_PAX_USERCOPY
57739+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57740+#else
57741+#define SLAB_USERCOPY 0x00000000UL
57742+#endif
57743+
57744 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57745 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57746 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57747@@ -87,10 +95,13 @@
57748 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57749 * Both make kfree a no-op.
57750 */
57751-#define ZERO_SIZE_PTR ((void *)16)
57752+#define ZERO_SIZE_PTR \
57753+({ \
57754+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57755+ (void *)(-MAX_ERRNO-1L); \
57756+})
57757
57758-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57759- (unsigned long)ZERO_SIZE_PTR)
57760+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57761
57762 /*
57763 * struct kmem_cache related prototypes
57764@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
57765 void kfree(const void *);
57766 void kzfree(const void *);
57767 size_t ksize(const void *);
57768+void check_object_size(const void *ptr, unsigned long n, bool to);
57769
57770 /*
57771 * Allocator specific definitions. These are mainly used to establish optimized
57772@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
57773
57774 void __init kmem_cache_init_late(void);
57775
57776+#define kmalloc(x, y) \
57777+({ \
57778+ void *___retval; \
57779+ intoverflow_t ___x = (intoverflow_t)x; \
57780+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
57781+ ___retval = NULL; \
57782+ else \
57783+ ___retval = kmalloc((size_t)___x, (y)); \
57784+ ___retval; \
57785+})
57786+
57787+#define kmalloc_node(x, y, z) \
57788+({ \
57789+ void *___retval; \
57790+ intoverflow_t ___x = (intoverflow_t)x; \
57791+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57792+ ___retval = NULL; \
57793+ else \
57794+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
57795+ ___retval; \
57796+})
57797+
57798+#define kzalloc(x, y) \
57799+({ \
57800+ void *___retval; \
57801+ intoverflow_t ___x = (intoverflow_t)x; \
57802+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
57803+ ___retval = NULL; \
57804+ else \
57805+ ___retval = kzalloc((size_t)___x, (y)); \
57806+ ___retval; \
57807+})
57808+
57809+#define __krealloc(x, y, z) \
57810+({ \
57811+ void *___retval; \
57812+ intoverflow_t ___y = (intoverflow_t)y; \
57813+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
57814+ ___retval = NULL; \
57815+ else \
57816+ ___retval = __krealloc((x), (size_t)___y, (z)); \
57817+ ___retval; \
57818+})
57819+
57820+#define krealloc(x, y, z) \
57821+({ \
57822+ void *___retval; \
57823+ intoverflow_t ___y = (intoverflow_t)y; \
57824+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
57825+ ___retval = NULL; \
57826+ else \
57827+ ___retval = krealloc((x), (size_t)___y, (z)); \
57828+ ___retval; \
57829+})
57830+
57831 #endif /* _LINUX_SLAB_H */
57832diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
57833--- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
57834+++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
57835@@ -82,7 +82,7 @@ struct kmem_cache {
57836 struct kmem_cache_order_objects max;
57837 struct kmem_cache_order_objects min;
57838 gfp_t allocflags; /* gfp flags to use on each alloc */
57839- int refcount; /* Refcount for slab cache destroy */
57840+ atomic_t refcount; /* Refcount for slab cache destroy */
57841 void (*ctor)(void *);
57842 int inuse; /* Offset to metadata */
57843 int align; /* Alignment */
57844@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
57845 }
57846
57847 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57848-void *__kmalloc(size_t size, gfp_t flags);
57849+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
57850
57851 static __always_inline void *
57852 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
57853diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
57854--- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
57855+++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
57856@@ -61,7 +61,7 @@ struct sonet_stats {
57857 #include <asm/atomic.h>
57858
57859 struct k_sonet_stats {
57860-#define __HANDLE_ITEM(i) atomic_t i
57861+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57862 __SONET_ITEMS
57863 #undef __HANDLE_ITEM
57864 };
57865diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
57866--- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
57867+++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
57868@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
57869 {
57870 switch (sap->sa_family) {
57871 case AF_INET:
57872- return ntohs(((struct sockaddr_in *)sap)->sin_port);
57873+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57874 case AF_INET6:
57875- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57876+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57877 }
57878 return 0;
57879 }
57880@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
57881 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57882 const struct sockaddr *src)
57883 {
57884- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57885+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57886 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57887
57888 dsin->sin_family = ssin->sin_family;
57889@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
57890 if (sa->sa_family != AF_INET6)
57891 return 0;
57892
57893- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57894+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57895 }
57896
57897 #endif /* __KERNEL__ */
57898diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
57899--- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
57900+++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
57901@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57902 extern unsigned int svcrdma_max_requests;
57903 extern unsigned int svcrdma_max_req_size;
57904
57905-extern atomic_t rdma_stat_recv;
57906-extern atomic_t rdma_stat_read;
57907-extern atomic_t rdma_stat_write;
57908-extern atomic_t rdma_stat_sq_starve;
57909-extern atomic_t rdma_stat_rq_starve;
57910-extern atomic_t rdma_stat_rq_poll;
57911-extern atomic_t rdma_stat_rq_prod;
57912-extern atomic_t rdma_stat_sq_poll;
57913-extern atomic_t rdma_stat_sq_prod;
57914+extern atomic_unchecked_t rdma_stat_recv;
57915+extern atomic_unchecked_t rdma_stat_read;
57916+extern atomic_unchecked_t rdma_stat_write;
57917+extern atomic_unchecked_t rdma_stat_sq_starve;
57918+extern atomic_unchecked_t rdma_stat_rq_starve;
57919+extern atomic_unchecked_t rdma_stat_rq_poll;
57920+extern atomic_unchecked_t rdma_stat_rq_prod;
57921+extern atomic_unchecked_t rdma_stat_sq_poll;
57922+extern atomic_unchecked_t rdma_stat_sq_prod;
57923
57924 #define RPCRDMA_VERSION 1
57925
57926diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
57927--- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
57928+++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
57929@@ -155,7 +155,11 @@ enum
57930 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57931 };
57932
57933-
57934+#ifdef CONFIG_PAX_SOFTMODE
57935+enum {
57936+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57937+};
57938+#endif
57939
57940 /* CTL_VM names: */
57941 enum
57942@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
57943
57944 extern int proc_dostring(struct ctl_table *, int,
57945 void __user *, size_t *, loff_t *);
57946+extern int proc_dostring_modpriv(struct ctl_table *, int,
57947+ void __user *, size_t *, loff_t *);
57948 extern int proc_dointvec(struct ctl_table *, int,
57949 void __user *, size_t *, loff_t *);
57950 extern int proc_dointvec_minmax(struct ctl_table *, int,
57951diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
57952--- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
57953+++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
57954@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
57955
57956 struct module *owner;
57957
57958- int refcount;
57959+ atomic_t refcount;
57960 };
57961
57962 struct tty_ldisc {
57963diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
57964--- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
57965+++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
57966@@ -213,10 +213,26 @@ typedef struct {
57967 int counter;
57968 } atomic_t;
57969
57970+#ifdef CONFIG_PAX_REFCOUNT
57971+typedef struct {
57972+ int counter;
57973+} atomic_unchecked_t;
57974+#else
57975+typedef atomic_t atomic_unchecked_t;
57976+#endif
57977+
57978 #ifdef CONFIG_64BIT
57979 typedef struct {
57980 long counter;
57981 } atomic64_t;
57982+
57983+#ifdef CONFIG_PAX_REFCOUNT
57984+typedef struct {
57985+ long counter;
57986+} atomic64_unchecked_t;
57987+#else
57988+typedef atomic64_t atomic64_unchecked_t;
57989+#endif
57990 #endif
57991
57992 struct list_head {
57993diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
57994--- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
57995+++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
57996@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57997 long ret; \
57998 mm_segment_t old_fs = get_fs(); \
57999 \
58000- set_fs(KERNEL_DS); \
58001 pagefault_disable(); \
58002+ set_fs(KERNEL_DS); \
58003 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58004- pagefault_enable(); \
58005 set_fs(old_fs); \
58006+ pagefault_enable(); \
58007 ret; \
58008 })
58009
58010diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
58011--- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
58012+++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
58013@@ -6,32 +6,32 @@
58014
58015 static inline u16 get_unaligned_le16(const void *p)
58016 {
58017- return le16_to_cpup((__le16 *)p);
58018+ return le16_to_cpup((const __le16 *)p);
58019 }
58020
58021 static inline u32 get_unaligned_le32(const void *p)
58022 {
58023- return le32_to_cpup((__le32 *)p);
58024+ return le32_to_cpup((const __le32 *)p);
58025 }
58026
58027 static inline u64 get_unaligned_le64(const void *p)
58028 {
58029- return le64_to_cpup((__le64 *)p);
58030+ return le64_to_cpup((const __le64 *)p);
58031 }
58032
58033 static inline u16 get_unaligned_be16(const void *p)
58034 {
58035- return be16_to_cpup((__be16 *)p);
58036+ return be16_to_cpup((const __be16 *)p);
58037 }
58038
58039 static inline u32 get_unaligned_be32(const void *p)
58040 {
58041- return be32_to_cpup((__be32 *)p);
58042+ return be32_to_cpup((const __be32 *)p);
58043 }
58044
58045 static inline u64 get_unaligned_be64(const void *p)
58046 {
58047- return be64_to_cpup((__be64 *)p);
58048+ return be64_to_cpup((const __be64 *)p);
58049 }
58050
58051 static inline void put_unaligned_le16(u16 val, void *p)
58052diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
58053--- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
58054+++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
58055@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58056 #define VM_MAP 0x00000004 /* vmap()ed pages */
58057 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58058 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58059+
58060+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58061+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58062+#endif
58063+
58064 /* bits [20..32] reserved for arch specific ioremap internals */
58065
58066 /*
58067@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
58068 # endif
58069 #endif
58070
58071+#define vmalloc(x) \
58072+({ \
58073+ void *___retval; \
58074+ intoverflow_t ___x = (intoverflow_t)x; \
58075+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58076+ ___retval = NULL; \
58077+ else \
58078+ ___retval = vmalloc((unsigned long)___x); \
58079+ ___retval; \
58080+})
58081+
58082+#define vzalloc(x) \
58083+({ \
58084+ void *___retval; \
58085+ intoverflow_t ___x = (intoverflow_t)x; \
58086+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
58087+ ___retval = NULL; \
58088+ else \
58089+ ___retval = vzalloc((unsigned long)___x); \
58090+ ___retval; \
58091+})
58092+
58093+#define __vmalloc(x, y, z) \
58094+({ \
58095+ void *___retval; \
58096+ intoverflow_t ___x = (intoverflow_t)x; \
58097+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58098+ ___retval = NULL; \
58099+ else \
58100+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58101+ ___retval; \
58102+})
58103+
58104+#define vmalloc_user(x) \
58105+({ \
58106+ void *___retval; \
58107+ intoverflow_t ___x = (intoverflow_t)x; \
58108+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58109+ ___retval = NULL; \
58110+ else \
58111+ ___retval = vmalloc_user((unsigned long)___x); \
58112+ ___retval; \
58113+})
58114+
58115+#define vmalloc_exec(x) \
58116+({ \
58117+ void *___retval; \
58118+ intoverflow_t ___x = (intoverflow_t)x; \
58119+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58120+ ___retval = NULL; \
58121+ else \
58122+ ___retval = vmalloc_exec((unsigned long)___x); \
58123+ ___retval; \
58124+})
58125+
58126+#define vmalloc_node(x, y) \
58127+({ \
58128+ void *___retval; \
58129+ intoverflow_t ___x = (intoverflow_t)x; \
58130+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58131+ ___retval = NULL; \
58132+ else \
58133+ ___retval = vmalloc_node((unsigned long)___x, (y));\
58134+ ___retval; \
58135+})
58136+
58137+#define vzalloc_node(x, y) \
58138+({ \
58139+ void *___retval; \
58140+ intoverflow_t ___x = (intoverflow_t)x; \
58141+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
58142+ ___retval = NULL; \
58143+ else \
58144+ ___retval = vzalloc_node((unsigned long)___x, (y));\
58145+ ___retval; \
58146+})
58147+
58148+#define vmalloc_32(x) \
58149+({ \
58150+ void *___retval; \
58151+ intoverflow_t ___x = (intoverflow_t)x; \
58152+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58153+ ___retval = NULL; \
58154+ else \
58155+ ___retval = vmalloc_32((unsigned long)___x); \
58156+ ___retval; \
58157+})
58158+
58159+#define vmalloc_32_user(x) \
58160+({ \
58161+void *___retval; \
58162+ intoverflow_t ___x = (intoverflow_t)x; \
58163+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58164+ ___retval = NULL; \
58165+ else \
58166+ ___retval = vmalloc_32_user((unsigned long)___x);\
58167+ ___retval; \
58168+})
58169+
58170 #endif /* _LINUX_VMALLOC_H */
58171diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
58172--- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
58173+++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
58174@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
58175 /*
58176 * Zone based page accounting with per cpu differentials.
58177 */
58178-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58179+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58180
58181 static inline void zone_page_state_add(long x, struct zone *zone,
58182 enum zone_stat_item item)
58183 {
58184- atomic_long_add(x, &zone->vm_stat[item]);
58185- atomic_long_add(x, &vm_stat[item]);
58186+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
58187+ atomic_long_add_unchecked(x, &vm_stat[item]);
58188 }
58189
58190 static inline unsigned long global_page_state(enum zone_stat_item item)
58191 {
58192- long x = atomic_long_read(&vm_stat[item]);
58193+ long x = atomic_long_read_unchecked(&vm_stat[item]);
58194 #ifdef CONFIG_SMP
58195 if (x < 0)
58196 x = 0;
58197@@ -109,7 +109,7 @@ static inline unsigned long global_page_
58198 static inline unsigned long zone_page_state(struct zone *zone,
58199 enum zone_stat_item item)
58200 {
58201- long x = atomic_long_read(&zone->vm_stat[item]);
58202+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58203 #ifdef CONFIG_SMP
58204 if (x < 0)
58205 x = 0;
58206@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
58207 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
58208 enum zone_stat_item item)
58209 {
58210- long x = atomic_long_read(&zone->vm_stat[item]);
58211+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58212
58213 #ifdef CONFIG_SMP
58214 int cpu;
58215@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
58216
58217 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
58218 {
58219- atomic_long_inc(&zone->vm_stat[item]);
58220- atomic_long_inc(&vm_stat[item]);
58221+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
58222+ atomic_long_inc_unchecked(&vm_stat[item]);
58223 }
58224
58225 static inline void __inc_zone_page_state(struct page *page,
58226@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
58227
58228 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
58229 {
58230- atomic_long_dec(&zone->vm_stat[item]);
58231- atomic_long_dec(&vm_stat[item]);
58232+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
58233+ atomic_long_dec_unchecked(&vm_stat[item]);
58234 }
58235
58236 static inline void __dec_zone_page_state(struct page *page,
58237diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
58238--- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
58239+++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
58240@@ -163,7 +163,7 @@ struct saa7146_ext_vv
58241 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
58242
58243 /* the extension can override this */
58244- struct v4l2_ioctl_ops ops;
58245+ v4l2_ioctl_ops_no_const ops;
58246 /* pointer to the saa7146 core ops */
58247 const struct v4l2_ioctl_ops *core_ops;
58248
58249diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
58250--- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
58251+++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
58252@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
58253 long (*vidioc_default) (struct file *file, void *fh,
58254 bool valid_prio, int cmd, void *arg);
58255 };
58256+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
58257
58258
58259 /* v4l debugging and diagnostics */
58260diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
58261--- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
58262+++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
58263@@ -52,7 +52,7 @@ struct cfctrl_rsp {
58264 void (*radioset_rsp)(void);
58265 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
58266 struct cflayer *client_layer);
58267-};
58268+} __no_const;
58269
58270 /* Link Setup Parameters for CAIF-Links. */
58271 struct cfctrl_link_param {
58272@@ -101,8 +101,8 @@ struct cfctrl_request_info {
58273 struct cfctrl {
58274 struct cfsrvl serv;
58275 struct cfctrl_rsp res;
58276- atomic_t req_seq_no;
58277- atomic_t rsp_seq_no;
58278+ atomic_unchecked_t req_seq_no;
58279+ atomic_unchecked_t rsp_seq_no;
58280 struct list_head list;
58281 /* Protects from simultaneous access to first_req list */
58282 spinlock_t info_list_lock;
58283diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
58284--- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
58285+++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
58286@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
58287 u8 dir, flow_resolve_t resolver, void *ctx);
58288
58289 extern void flow_cache_flush(void);
58290-extern atomic_t flow_cache_genid;
58291+extern atomic_unchecked_t flow_cache_genid;
58292
58293 #endif
58294diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
58295--- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
58296+++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
58297@@ -43,8 +43,8 @@ struct inet_peer {
58298 */
58299 union {
58300 struct {
58301- atomic_t rid; /* Frag reception counter */
58302- atomic_t ip_id_count; /* IP ID for the next packet */
58303+ atomic_unchecked_t rid; /* Frag reception counter */
58304+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
58305 __u32 tcp_ts;
58306 __u32 tcp_ts_stamp;
58307 u32 metrics[RTAX_MAX];
58308@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
58309 {
58310 more++;
58311 inet_peer_refcheck(p);
58312- return atomic_add_return(more, &p->ip_id_count) - more;
58313+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
58314 }
58315
58316 #endif /* _NET_INETPEER_H */
58317diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
58318--- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
58319+++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
58320@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
58321
58322 #define FIB_RES_SADDR(net, res) \
58323 ((FIB_RES_NH(res).nh_saddr_genid == \
58324- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
58325+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
58326 FIB_RES_NH(res).nh_saddr : \
58327 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
58328 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
58329diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
58330--- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
58331+++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
58332@@ -509,7 +509,7 @@ struct ip_vs_conn {
58333 struct ip_vs_conn *control; /* Master control connection */
58334 atomic_t n_control; /* Number of controlled ones */
58335 struct ip_vs_dest *dest; /* real server */
58336- atomic_t in_pkts; /* incoming packet counter */
58337+ atomic_unchecked_t in_pkts; /* incoming packet counter */
58338
58339 /* packet transmitter for different forwarding methods. If it
58340 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58341@@ -647,7 +647,7 @@ struct ip_vs_dest {
58342 __be16 port; /* port number of the server */
58343 union nf_inet_addr addr; /* IP address of the server */
58344 volatile unsigned flags; /* dest status flags */
58345- atomic_t conn_flags; /* flags to copy to conn */
58346+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
58347 atomic_t weight; /* server weight */
58348
58349 atomic_t refcnt; /* reference counter */
58350diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
58351--- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
58352+++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
58353@@ -51,7 +51,7 @@ typedef struct {
58354 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
58355 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
58356 struct ircomm_info *);
58357-} call_t;
58358+} __no_const call_t;
58359
58360 struct ircomm_cb {
58361 irda_queue_t queue;
58362diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
58363--- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
58364+++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
58365@@ -35,6 +35,7 @@
58366 #include <linux/termios.h>
58367 #include <linux/timer.h>
58368 #include <linux/tty.h> /* struct tty_struct */
58369+#include <asm/local.h>
58370
58371 #include <net/irda/irias_object.h>
58372 #include <net/irda/ircomm_core.h>
58373@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58374 unsigned short close_delay;
58375 unsigned short closing_wait; /* time to wait before closing */
58376
58377- int open_count;
58378- int blocked_open; /* # of blocked opens */
58379+ local_t open_count;
58380+ local_t blocked_open; /* # of blocked opens */
58381
58382 /* Protect concurent access to :
58383 * o self->open_count
58384diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
58385--- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
58386+++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
58387@@ -87,7 +87,7 @@ struct iucv_sock {
58388 struct iucv_sock_list {
58389 struct hlist_head head;
58390 rwlock_t lock;
58391- atomic_t autobind_name;
58392+ atomic_unchecked_t autobind_name;
58393 };
58394
58395 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58396diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
58397--- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
58398+++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
58399@@ -95,7 +95,7 @@ struct lapb_cb {
58400 struct sk_buff_head write_queue;
58401 struct sk_buff_head ack_queue;
58402 unsigned char window;
58403- struct lapb_register_struct callbacks;
58404+ struct lapb_register_struct *callbacks;
58405
58406 /* FRMR control information */
58407 struct lapb_frame frmr_data;
58408diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
58409--- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
58410+++ linux-3.0.4/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
58411@@ -124,7 +124,7 @@ struct neigh_ops {
58412 int (*connected_output)(struct sk_buff*);
58413 int (*hh_output)(struct sk_buff*);
58414 int (*queue_xmit)(struct sk_buff*);
58415-};
58416+} __do_const;
58417
58418 struct pneigh_entry {
58419 struct pneigh_entry *next;
58420diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
58421--- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
58422+++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
58423@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
58424 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58425 {
58426 if (mark)
58427- skb_trim(skb, (unsigned char *) mark - skb->data);
58428+ skb_trim(skb, (const unsigned char *) mark - skb->data);
58429 }
58430
58431 /**
58432diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
58433--- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
58434+++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
58435@@ -56,8 +56,8 @@ struct netns_ipv4 {
58436
58437 unsigned int sysctl_ping_group_range[2];
58438
58439- atomic_t rt_genid;
58440- atomic_t dev_addr_genid;
58441+ atomic_unchecked_t rt_genid;
58442+ atomic_unchecked_t dev_addr_genid;
58443
58444 #ifdef CONFIG_IP_MROUTE
58445 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
58446diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
58447--- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
58448+++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
58449@@ -315,9 +315,9 @@ do { \
58450
58451 #else /* SCTP_DEBUG */
58452
58453-#define SCTP_DEBUG_PRINTK(whatever...)
58454-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
58455-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58456+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58457+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
58458+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58459 #define SCTP_ENABLE_DEBUG
58460 #define SCTP_DISABLE_DEBUG
58461 #define SCTP_ASSERT(expr, str, func)
58462diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
58463--- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
58464+++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
58465@@ -277,7 +277,7 @@ struct sock {
58466 #ifdef CONFIG_RPS
58467 __u32 sk_rxhash;
58468 #endif
58469- atomic_t sk_drops;
58470+ atomic_unchecked_t sk_drops;
58471 int sk_rcvbuf;
58472
58473 struct sk_filter __rcu *sk_filter;
58474@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
58475 }
58476
58477 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
58478- char __user *from, char *to,
58479+ char __user *from, unsigned char *to,
58480 int copy, int offset)
58481 {
58482 if (skb->ip_summed == CHECKSUM_NONE) {
58483diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
58484--- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
58485+++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
58486@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
58487 struct tcp_seq_afinfo {
58488 char *name;
58489 sa_family_t family;
58490- struct file_operations seq_fops;
58491- struct seq_operations seq_ops;
58492+ file_operations_no_const seq_fops;
58493+ seq_operations_no_const seq_ops;
58494 };
58495
58496 struct tcp_iter_state {
58497diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
58498--- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
58499+++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
58500@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
58501 char *name;
58502 sa_family_t family;
58503 struct udp_table *udp_table;
58504- struct file_operations seq_fops;
58505- struct seq_operations seq_ops;
58506+ file_operations_no_const seq_fops;
58507+ seq_operations_no_const seq_ops;
58508 };
58509
58510 struct udp_iter_state {
58511diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
58512--- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
58513+++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
58514@@ -505,7 +505,7 @@ struct xfrm_policy {
58515 struct timer_list timer;
58516
58517 struct flow_cache_object flo;
58518- atomic_t genid;
58519+ atomic_unchecked_t genid;
58520 u32 priority;
58521 u32 index;
58522 struct xfrm_mark mark;
58523diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
58524--- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
58525+++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
58526@@ -120,7 +120,7 @@ struct iw_cm_verbs {
58527 int backlog);
58528
58529 int (*destroy_listen)(struct iw_cm_id *cm_id);
58530-};
58531+} __no_const;
58532
58533 /**
58534 * iw_create_cm_id - Create an IW CM identifier.
58535diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
58536--- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
58537+++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
58538@@ -750,6 +750,7 @@ struct libfc_function_template {
58539 */
58540 void (*disc_stop_final) (struct fc_lport *);
58541 };
58542+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
58543
58544 /**
58545 * struct fc_disc - Discovery context
58546@@ -853,7 +854,7 @@ struct fc_lport {
58547 struct fc_vport *vport;
58548
58549 /* Operational Information */
58550- struct libfc_function_template tt;
58551+ libfc_function_template_no_const tt;
58552 u8 link_up;
58553 u8 qfull;
58554 enum fc_lport_state state;
58555diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
58556--- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
58557+++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
58558@@ -161,9 +161,9 @@ struct scsi_device {
58559 unsigned int max_device_blocked; /* what device_blocked counts down from */
58560 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58561
58562- atomic_t iorequest_cnt;
58563- atomic_t iodone_cnt;
58564- atomic_t ioerr_cnt;
58565+ atomic_unchecked_t iorequest_cnt;
58566+ atomic_unchecked_t iodone_cnt;
58567+ atomic_unchecked_t ioerr_cnt;
58568
58569 struct device sdev_gendev,
58570 sdev_dev;
58571diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
58572--- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
58573+++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
58574@@ -711,7 +711,7 @@ struct fc_function_template {
58575 unsigned long show_host_system_hostname:1;
58576
58577 unsigned long disable_target_scan:1;
58578-};
58579+} __do_const;
58580
58581
58582 /**
58583diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
58584--- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
58585+++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
58586@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
58587 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
58588 unsigned char val);
58589 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
58590-};
58591+} __no_const;
58592
58593 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
58594
58595diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
58596--- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
58597+++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
58598@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
58599 struct snd_hwdep_dsp_status *status);
58600 int (*dsp_load)(struct snd_hwdep *hw,
58601 struct snd_hwdep_dsp_image *image);
58602-};
58603+} __no_const;
58604
58605 struct snd_hwdep {
58606 struct snd_card *card;
58607diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
58608--- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
58609+++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
58610@@ -44,7 +44,7 @@ struct snd_info_entry_text {
58611 struct snd_info_buffer *buffer);
58612 void (*write)(struct snd_info_entry *entry,
58613 struct snd_info_buffer *buffer);
58614-};
58615+} __no_const;
58616
58617 struct snd_info_entry_ops {
58618 int (*open)(struct snd_info_entry *entry,
58619diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
58620--- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
58621+++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
58622@@ -81,6 +81,7 @@ struct snd_pcm_ops {
58623 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
58624 int (*ack)(struct snd_pcm_substream *substream);
58625 };
58626+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
58627
58628 /*
58629 *
58630diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
58631--- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
58632+++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
58633@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
58634 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
58635 int (*csp_stop) (struct snd_sb_csp * p);
58636 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
58637-};
58638+} __no_const;
58639
58640 /*
58641 * CSP private data
58642diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
58643--- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
58644+++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
58645@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
58646
58647 /* platform stream ops */
58648 struct snd_pcm_ops *ops;
58649-};
58650+} __do_const;
58651
58652 struct snd_soc_platform {
58653 const char *name;
58654diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
58655--- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
58656+++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
58657@@ -358,7 +358,7 @@ struct snd_ymfpci {
58658 spinlock_t reg_lock;
58659 spinlock_t voice_lock;
58660 wait_queue_head_t interrupt_sleep;
58661- atomic_t interrupt_sleep_count;
58662+ atomic_unchecked_t interrupt_sleep_count;
58663 struct snd_info_entry *proc_entry;
58664 const struct firmware *dsp_microcode;
58665 const struct firmware *controller_microcode;
58666diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
58667--- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
58668+++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
58669@@ -364,7 +364,7 @@ struct t10_reservation_ops {
58670 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
58671 int (*t10_pr_register)(struct se_cmd *);
58672 int (*t10_pr_clear)(struct se_cmd *);
58673-};
58674+} __no_const;
58675
58676 struct t10_reservation_template {
58677 /* Reservation effects all target ports */
58678@@ -432,8 +432,8 @@ struct se_transport_task {
58679 atomic_t t_task_cdbs_left;
58680 atomic_t t_task_cdbs_ex_left;
58681 atomic_t t_task_cdbs_timeout_left;
58682- atomic_t t_task_cdbs_sent;
58683- atomic_t t_transport_aborted;
58684+ atomic_unchecked_t t_task_cdbs_sent;
58685+ atomic_unchecked_t t_transport_aborted;
58686 atomic_t t_transport_active;
58687 atomic_t t_transport_complete;
58688 atomic_t t_transport_queue_active;
58689@@ -774,7 +774,7 @@ struct se_device {
58690 atomic_t active_cmds;
58691 atomic_t simple_cmds;
58692 atomic_t depth_left;
58693- atomic_t dev_ordered_id;
58694+ atomic_unchecked_t dev_ordered_id;
58695 atomic_t dev_tur_active;
58696 atomic_t execute_tasks;
58697 atomic_t dev_status_thr_count;
58698diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
58699--- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
58700+++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
58701@@ -36,7 +36,7 @@ struct softirq_action;
58702 */
58703 TRACE_EVENT(irq_handler_entry,
58704
58705- TP_PROTO(int irq, struct irqaction *action),
58706+ TP_PROTO(int irq, const struct irqaction *action),
58707
58708 TP_ARGS(irq, action),
58709
58710@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
58711 */
58712 TRACE_EVENT(irq_handler_exit,
58713
58714- TP_PROTO(int irq, struct irqaction *action, int ret),
58715+ TP_PROTO(int irq, const struct irqaction *action, int ret),
58716
58717 TP_ARGS(irq, action, ret),
58718
58719diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
58720--- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
58721+++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
58722@@ -51,10 +51,10 @@ struct dlfb_data {
58723 int base8;
58724 u32 pseudo_palette[256];
58725 /* blit-only rendering path metrics, exposed through sysfs */
58726- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58727- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
58728- atomic_t bytes_sent; /* to usb, after compression including overhead */
58729- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
58730+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58731+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
58732+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
58733+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
58734 };
58735
58736 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
58737diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
58738--- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
58739+++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
58740@@ -177,6 +177,7 @@ struct uvesafb_par {
58741 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58742 u8 pmi_setpal; /* PMI for palette changes */
58743 u16 *pmi_base; /* protected mode interface location */
58744+ u8 *pmi_code; /* protected mode code location */
58745 void *pmi_start;
58746 void *pmi_pal;
58747 u8 *vbe_state_orig; /*
58748diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
58749--- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
58750+++ linux-3.0.4/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
58751@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
58752
58753 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58754 {
58755- int err = sys_mount(name, "/root", fs, flags, data);
58756+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58757 if (err)
58758 return err;
58759
58760@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
58761 va_start(args, fmt);
58762 vsprintf(buf, fmt, args);
58763 va_end(args);
58764- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58765+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58766 if (fd >= 0) {
58767 sys_ioctl(fd, FDEJECT, 0);
58768 sys_close(fd);
58769 }
58770 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58771- fd = sys_open("/dev/console", O_RDWR, 0);
58772+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58773 if (fd >= 0) {
58774 sys_ioctl(fd, TCGETS, (long)&termios);
58775 termios.c_lflag &= ~ICANON;
58776 sys_ioctl(fd, TCSETSF, (long)&termios);
58777- sys_read(fd, &c, 1);
58778+ sys_read(fd, (char __user *)&c, 1);
58779 termios.c_lflag |= ICANON;
58780 sys_ioctl(fd, TCSETSF, (long)&termios);
58781 sys_close(fd);
58782@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58783 mount_root();
58784 out:
58785 devtmpfs_mount("dev");
58786- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58787+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58788 sys_chroot((const char __user __force *)".");
58789 }
58790diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
58791--- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
58792+++ linux-3.0.4/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
58793@@ -15,15 +15,15 @@ extern int root_mountflags;
58794
58795 static inline int create_dev(char *name, dev_t dev)
58796 {
58797- sys_unlink(name);
58798- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58799+ sys_unlink((__force char __user *)name);
58800+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58801 }
58802
58803 #if BITS_PER_LONG == 32
58804 static inline u32 bstat(char *name)
58805 {
58806 struct stat64 stat;
58807- if (sys_stat64(name, &stat) != 0)
58808+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58809 return 0;
58810 if (!S_ISBLK(stat.st_mode))
58811 return 0;
58812diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
58813--- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
58814+++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
58815@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58816 create_dev("/dev/root.old", Root_RAM0);
58817 /* mount initrd on rootfs' /root */
58818 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58819- sys_mkdir("/old", 0700);
58820- root_fd = sys_open("/", 0, 0);
58821- old_fd = sys_open("/old", 0, 0);
58822+ sys_mkdir((__force const char __user *)"/old", 0700);
58823+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
58824+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58825 /* move initrd over / and chdir/chroot in initrd root */
58826- sys_chdir("/root");
58827- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58828- sys_chroot(".");
58829+ sys_chdir((__force const char __user *)"/root");
58830+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58831+ sys_chroot((__force const char __user *)".");
58832
58833 /*
58834 * In case that a resume from disk is carried out by linuxrc or one of
58835@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58836
58837 /* move initrd to rootfs' /old */
58838 sys_fchdir(old_fd);
58839- sys_mount("/", ".", NULL, MS_MOVE, NULL);
58840+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58841 /* switch root and cwd back to / of rootfs */
58842 sys_fchdir(root_fd);
58843- sys_chroot(".");
58844+ sys_chroot((__force const char __user *)".");
58845 sys_close(old_fd);
58846 sys_close(root_fd);
58847
58848 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58849- sys_chdir("/old");
58850+ sys_chdir((__force const char __user *)"/old");
58851 return;
58852 }
58853
58854@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58855 mount_root();
58856
58857 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58858- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58859+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58860 if (!error)
58861 printk("okay\n");
58862 else {
58863- int fd = sys_open("/dev/root.old", O_RDWR, 0);
58864+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58865 if (error == -ENOENT)
58866 printk("/initrd does not exist. Ignored.\n");
58867 else
58868 printk("failed\n");
58869 printk(KERN_NOTICE "Unmounting old root\n");
58870- sys_umount("/old", MNT_DETACH);
58871+ sys_umount((__force char __user *)"/old", MNT_DETACH);
58872 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58873 if (fd < 0) {
58874 error = fd;
58875@@ -116,11 +116,11 @@ int __init initrd_load(void)
58876 * mounted in the normal path.
58877 */
58878 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58879- sys_unlink("/initrd.image");
58880+ sys_unlink((__force const char __user *)"/initrd.image");
58881 handle_initrd();
58882 return 1;
58883 }
58884 }
58885- sys_unlink("/initrd.image");
58886+ sys_unlink((__force const char __user *)"/initrd.image");
58887 return 0;
58888 }
58889diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
58890--- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
58891+++ linux-3.0.4/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
58892@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58893 partitioned ? "_d" : "", minor,
58894 md_setup_args[ent].device_names);
58895
58896- fd = sys_open(name, 0, 0);
58897+ fd = sys_open((__force char __user *)name, 0, 0);
58898 if (fd < 0) {
58899 printk(KERN_ERR "md: open failed - cannot start "
58900 "array %s\n", name);
58901@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58902 * array without it
58903 */
58904 sys_close(fd);
58905- fd = sys_open(name, 0, 0);
58906+ fd = sys_open((__force char __user *)name, 0, 0);
58907 sys_ioctl(fd, BLKRRPART, 0);
58908 }
58909 sys_close(fd);
58910diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
58911--- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
58912+++ linux-3.0.4/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
58913@@ -74,7 +74,7 @@ static void __init free_hash(void)
58914 }
58915 }
58916
58917-static long __init do_utime(char __user *filename, time_t mtime)
58918+static long __init do_utime(__force char __user *filename, time_t mtime)
58919 {
58920 struct timespec t[2];
58921
58922@@ -109,7 +109,7 @@ static void __init dir_utime(void)
58923 struct dir_entry *de, *tmp;
58924 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58925 list_del(&de->list);
58926- do_utime(de->name, de->mtime);
58927+ do_utime((__force char __user *)de->name, de->mtime);
58928 kfree(de->name);
58929 kfree(de);
58930 }
58931@@ -271,7 +271,7 @@ static int __init maybe_link(void)
58932 if (nlink >= 2) {
58933 char *old = find_link(major, minor, ino, mode, collected);
58934 if (old)
58935- return (sys_link(old, collected) < 0) ? -1 : 1;
58936+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58937 }
58938 return 0;
58939 }
58940@@ -280,11 +280,11 @@ static void __init clean_path(char *path
58941 {
58942 struct stat st;
58943
58944- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58945+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58946 if (S_ISDIR(st.st_mode))
58947- sys_rmdir(path);
58948+ sys_rmdir((__force char __user *)path);
58949 else
58950- sys_unlink(path);
58951+ sys_unlink((__force char __user *)path);
58952 }
58953 }
58954
58955@@ -305,7 +305,7 @@ static int __init do_name(void)
58956 int openflags = O_WRONLY|O_CREAT;
58957 if (ml != 1)
58958 openflags |= O_TRUNC;
58959- wfd = sys_open(collected, openflags, mode);
58960+ wfd = sys_open((__force char __user *)collected, openflags, mode);
58961
58962 if (wfd >= 0) {
58963 sys_fchown(wfd, uid, gid);
58964@@ -317,17 +317,17 @@ static int __init do_name(void)
58965 }
58966 }
58967 } else if (S_ISDIR(mode)) {
58968- sys_mkdir(collected, mode);
58969- sys_chown(collected, uid, gid);
58970- sys_chmod(collected, mode);
58971+ sys_mkdir((__force char __user *)collected, mode);
58972+ sys_chown((__force char __user *)collected, uid, gid);
58973+ sys_chmod((__force char __user *)collected, mode);
58974 dir_add(collected, mtime);
58975 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58976 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58977 if (maybe_link() == 0) {
58978- sys_mknod(collected, mode, rdev);
58979- sys_chown(collected, uid, gid);
58980- sys_chmod(collected, mode);
58981- do_utime(collected, mtime);
58982+ sys_mknod((__force char __user *)collected, mode, rdev);
58983+ sys_chown((__force char __user *)collected, uid, gid);
58984+ sys_chmod((__force char __user *)collected, mode);
58985+ do_utime((__force char __user *)collected, mtime);
58986 }
58987 }
58988 return 0;
58989@@ -336,15 +336,15 @@ static int __init do_name(void)
58990 static int __init do_copy(void)
58991 {
58992 if (count >= body_len) {
58993- sys_write(wfd, victim, body_len);
58994+ sys_write(wfd, (__force char __user *)victim, body_len);
58995 sys_close(wfd);
58996- do_utime(vcollected, mtime);
58997+ do_utime((__force char __user *)vcollected, mtime);
58998 kfree(vcollected);
58999 eat(body_len);
59000 state = SkipIt;
59001 return 0;
59002 } else {
59003- sys_write(wfd, victim, count);
59004+ sys_write(wfd, (__force char __user *)victim, count);
59005 body_len -= count;
59006 eat(count);
59007 return 1;
59008@@ -355,9 +355,9 @@ static int __init do_symlink(void)
59009 {
59010 collected[N_ALIGN(name_len) + body_len] = '\0';
59011 clean_path(collected, 0);
59012- sys_symlink(collected + N_ALIGN(name_len), collected);
59013- sys_lchown(collected, uid, gid);
59014- do_utime(collected, mtime);
59015+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
59016+ sys_lchown((__force char __user *)collected, uid, gid);
59017+ do_utime((__force char __user *)collected, mtime);
59018 state = SkipIt;
59019 next_state = Reset;
59020 return 0;
59021diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
59022--- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
59023+++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
59024@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
59025
59026 config COMPAT_BRK
59027 bool "Disable heap randomization"
59028- default y
59029+ default n
59030 help
59031 Randomizing heap placement makes heap exploits harder, but it
59032 also breaks ancient binaries (including anything libc5 based).
59033diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
59034--- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
59035+++ linux-3.0.4/init/main.c 2011-08-23 21:48:14.000000000 -0400
59036@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
59037 extern void tc_init(void);
59038 #endif
59039
59040+extern void grsecurity_init(void);
59041+
59042 /*
59043 * Debug helper: via this flag we know that we are in 'early bootup code'
59044 * where only the boot processor is running with IRQ disabled. This means
59045@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
59046
59047 __setup("reset_devices", set_reset_devices);
59048
59049+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
59050+extern char pax_enter_kernel_user[];
59051+extern char pax_exit_kernel_user[];
59052+extern pgdval_t clone_pgd_mask;
59053+#endif
59054+
59055+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
59056+static int __init setup_pax_nouderef(char *str)
59057+{
59058+#ifdef CONFIG_X86_32
59059+ unsigned int cpu;
59060+ struct desc_struct *gdt;
59061+
59062+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
59063+ gdt = get_cpu_gdt_table(cpu);
59064+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
59065+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
59066+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
59067+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
59068+ }
59069+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59070+#else
59071+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59072+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59073+ clone_pgd_mask = ~(pgdval_t)0UL;
59074+#endif
59075+
59076+ return 0;
59077+}
59078+early_param("pax_nouderef", setup_pax_nouderef);
59079+#endif
59080+
59081+#ifdef CONFIG_PAX_SOFTMODE
59082+int pax_softmode;
59083+
59084+static int __init setup_pax_softmode(char *str)
59085+{
59086+ get_option(&str, &pax_softmode);
59087+ return 1;
59088+}
59089+__setup("pax_softmode=", setup_pax_softmode);
59090+#endif
59091+
59092 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59093 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59094 static const char *panic_later, *panic_param;
59095@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
59096 {
59097 int count = preempt_count();
59098 int ret;
59099+ const char *msg1 = "", *msg2 = "";
59100
59101 if (initcall_debug)
59102 ret = do_one_initcall_debug(fn);
59103@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
59104 sprintf(msgbuf, "error code %d ", ret);
59105
59106 if (preempt_count() != count) {
59107- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59108+ msg1 = " preemption imbalance";
59109 preempt_count() = count;
59110 }
59111 if (irqs_disabled()) {
59112- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59113+ msg2 = " disabled interrupts";
59114 local_irq_enable();
59115 }
59116- if (msgbuf[0]) {
59117- printk("initcall %pF returned with %s\n", fn, msgbuf);
59118+ if (msgbuf[0] || *msg1 || *msg2) {
59119+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59120 }
59121
59122 return ret;
59123@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
59124 do_basic_setup();
59125
59126 /* Open the /dev/console on the rootfs, this should never fail */
59127- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
59128+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
59129 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
59130
59131 (void) sys_dup(0);
59132@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
59133 if (!ramdisk_execute_command)
59134 ramdisk_execute_command = "/init";
59135
59136- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59137+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
59138 ramdisk_execute_command = NULL;
59139 prepare_namespace();
59140 }
59141
59142+ grsecurity_init();
59143+
59144 /*
59145 * Ok, we have completed the initial bootup, and
59146 * we're essentially up and running. Get rid of the
59147diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
59148--- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
59149+++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
59150@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
59151 mq_bytes = (mq_msg_tblsz +
59152 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59153
59154+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59155 spin_lock(&mq_lock);
59156 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59157 u->mq_bytes + mq_bytes >
59158diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
59159--- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
59160+++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
59161@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
59162 return security_msg_queue_associate(msq, msgflg);
59163 }
59164
59165+static struct ipc_ops msg_ops = {
59166+ .getnew = newque,
59167+ .associate = msg_security,
59168+ .more_checks = NULL
59169+};
59170+
59171 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
59172 {
59173 struct ipc_namespace *ns;
59174- struct ipc_ops msg_ops;
59175 struct ipc_params msg_params;
59176
59177 ns = current->nsproxy->ipc_ns;
59178
59179- msg_ops.getnew = newque;
59180- msg_ops.associate = msg_security;
59181- msg_ops.more_checks = NULL;
59182-
59183 msg_params.key = key;
59184 msg_params.flg = msgflg;
59185
59186diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
59187--- linux-3.0.4/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
59188+++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
59189@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
59190 return 0;
59191 }
59192
59193+static struct ipc_ops sem_ops = {
59194+ .getnew = newary,
59195+ .associate = sem_security,
59196+ .more_checks = sem_more_checks
59197+};
59198+
59199 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
59200 {
59201 struct ipc_namespace *ns;
59202- struct ipc_ops sem_ops;
59203 struct ipc_params sem_params;
59204
59205 ns = current->nsproxy->ipc_ns;
59206@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
59207 if (nsems < 0 || nsems > ns->sc_semmsl)
59208 return -EINVAL;
59209
59210- sem_ops.getnew = newary;
59211- sem_ops.associate = sem_security;
59212- sem_ops.more_checks = sem_more_checks;
59213-
59214 sem_params.key = key;
59215 sem_params.flg = semflg;
59216 sem_params.u.nsems = nsems;
59217@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
59218 int nsems;
59219 struct list_head tasks;
59220
59221+ pax_track_stack();
59222+
59223 sma = sem_lock_check(ns, semid);
59224 if (IS_ERR(sma))
59225 return PTR_ERR(sma);
59226@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
59227 struct ipc_namespace *ns;
59228 struct list_head tasks;
59229
59230+ pax_track_stack();
59231+
59232 ns = current->nsproxy->ipc_ns;
59233
59234 if (nsops < 1 || semid < 0)
59235diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
59236--- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
59237+++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
59238@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
59239 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
59240 #endif
59241
59242+#ifdef CONFIG_GRKERNSEC
59243+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59244+ const time_t shm_createtime, const uid_t cuid,
59245+ const int shmid);
59246+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59247+ const time_t shm_createtime);
59248+#endif
59249+
59250 void shm_init_ns(struct ipc_namespace *ns)
59251 {
59252 ns->shm_ctlmax = SHMMAX;
59253@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
59254 shp->shm_lprid = 0;
59255 shp->shm_atim = shp->shm_dtim = 0;
59256 shp->shm_ctim = get_seconds();
59257+#ifdef CONFIG_GRKERNSEC
59258+ {
59259+ struct timespec timeval;
59260+ do_posix_clock_monotonic_gettime(&timeval);
59261+
59262+ shp->shm_createtime = timeval.tv_sec;
59263+ }
59264+#endif
59265 shp->shm_segsz = size;
59266 shp->shm_nattch = 0;
59267 shp->shm_file = file;
59268@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
59269 return 0;
59270 }
59271
59272+static struct ipc_ops shm_ops = {
59273+ .getnew = newseg,
59274+ .associate = shm_security,
59275+ .more_checks = shm_more_checks
59276+};
59277+
59278 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
59279 {
59280 struct ipc_namespace *ns;
59281- struct ipc_ops shm_ops;
59282 struct ipc_params shm_params;
59283
59284 ns = current->nsproxy->ipc_ns;
59285
59286- shm_ops.getnew = newseg;
59287- shm_ops.associate = shm_security;
59288- shm_ops.more_checks = shm_more_checks;
59289-
59290 shm_params.key = key;
59291 shm_params.flg = shmflg;
59292 shm_params.u.size = size;
59293@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
59294 case SHM_LOCK:
59295 case SHM_UNLOCK:
59296 {
59297- struct file *uninitialized_var(shm_file);
59298-
59299 lru_add_drain_all(); /* drain pagevecs to lru lists */
59300
59301 shp = shm_lock_check(ns, shmid);
59302@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
59303 if (err)
59304 goto out_unlock;
59305
59306+#ifdef CONFIG_GRKERNSEC
59307+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
59308+ shp->shm_perm.cuid, shmid) ||
59309+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
59310+ err = -EACCES;
59311+ goto out_unlock;
59312+ }
59313+#endif
59314+
59315 path = shp->shm_file->f_path;
59316 path_get(&path);
59317 shp->shm_nattch++;
59318+#ifdef CONFIG_GRKERNSEC
59319+ shp->shm_lapid = current->pid;
59320+#endif
59321 size = i_size_read(path.dentry->d_inode);
59322 shm_unlock(shp);
59323
59324diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
59325--- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
59326+++ linux-3.0.4/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
59327@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
59328 */
59329 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
59330 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
59331- file->f_op->write(file, (char *)&ac,
59332+ file->f_op->write(file, (__force char __user *)&ac,
59333 sizeof(acct_t), &file->f_pos);
59334 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
59335 set_fs(fs);
59336diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
59337--- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
59338+++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
59339@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
59340 3) suppressed due to audit_rate_limit
59341 4) suppressed due to audit_backlog_limit
59342 */
59343-static atomic_t audit_lost = ATOMIC_INIT(0);
59344+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
59345
59346 /* The netlink socket. */
59347 static struct sock *audit_sock;
59348@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
59349 unsigned long now;
59350 int print;
59351
59352- atomic_inc(&audit_lost);
59353+ atomic_inc_unchecked(&audit_lost);
59354
59355 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
59356
59357@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
59358 printk(KERN_WARNING
59359 "audit: audit_lost=%d audit_rate_limit=%d "
59360 "audit_backlog_limit=%d\n",
59361- atomic_read(&audit_lost),
59362+ atomic_read_unchecked(&audit_lost),
59363 audit_rate_limit,
59364 audit_backlog_limit);
59365 audit_panic(message);
59366@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
59367 status_set.pid = audit_pid;
59368 status_set.rate_limit = audit_rate_limit;
59369 status_set.backlog_limit = audit_backlog_limit;
59370- status_set.lost = atomic_read(&audit_lost);
59371+ status_set.lost = atomic_read_unchecked(&audit_lost);
59372 status_set.backlog = skb_queue_len(&audit_skb_queue);
59373 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
59374 &status_set, sizeof(status_set));
59375diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
59376--- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
59377+++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
59378@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
59379 }
59380
59381 /* global counter which is incremented every time something logs in */
59382-static atomic_t session_id = ATOMIC_INIT(0);
59383+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
59384
59385 /**
59386 * audit_set_loginuid - set a task's audit_context loginuid
59387@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
59388 */
59389 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
59390 {
59391- unsigned int sessionid = atomic_inc_return(&session_id);
59392+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
59393 struct audit_context *context = task->audit_context;
59394
59395 if (context && context->in_syscall) {
59396diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
59397--- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
59398+++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
59399@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
59400 * before modification is attempted and the application
59401 * fails.
59402 */
59403+ if (tocopy > ARRAY_SIZE(kdata))
59404+ return -EFAULT;
59405+
59406 if (copy_to_user(dataptr, kdata, tocopy
59407 * sizeof(struct __user_cap_data_struct))) {
59408 return -EFAULT;
59409@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
59410 BUG();
59411 }
59412
59413- if (security_capable(ns, current_cred(), cap) == 0) {
59414+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
59415 current->flags |= PF_SUPERPRIV;
59416 return true;
59417 }
59418@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
59419 }
59420 EXPORT_SYMBOL(ns_capable);
59421
59422+bool ns_capable_nolog(struct user_namespace *ns, int cap)
59423+{
59424+ if (unlikely(!cap_valid(cap))) {
59425+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
59426+ BUG();
59427+ }
59428+
59429+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
59430+ current->flags |= PF_SUPERPRIV;
59431+ return true;
59432+ }
59433+ return false;
59434+}
59435+EXPORT_SYMBOL(ns_capable_nolog);
59436+
59437+bool capable_nolog(int cap)
59438+{
59439+ return ns_capable_nolog(&init_user_ns, cap);
59440+}
59441+EXPORT_SYMBOL(capable_nolog);
59442+
59443 /**
59444 * task_ns_capable - Determine whether current task has a superior
59445 * capability targeted at a specific task's user namespace.
59446@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
59447 }
59448 EXPORT_SYMBOL(task_ns_capable);
59449
59450+bool task_ns_capable_nolog(struct task_struct *t, int cap)
59451+{
59452+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
59453+}
59454+EXPORT_SYMBOL(task_ns_capable_nolog);
59455+
59456 /**
59457 * nsown_capable - Check superior capability to one's own user_ns
59458 * @cap: The capability in question
59459diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
59460--- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
59461+++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
59462@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
59463 struct hlist_head *hhead;
59464 struct cg_cgroup_link *link;
59465
59466+ pax_track_stack();
59467+
59468 /* First see if we already have a cgroup group that matches
59469 * the desired set */
59470 read_lock(&css_set_lock);
59471diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
59472--- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
59473+++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
59474@@ -13,6 +13,7 @@
59475
59476 #include <linux/linkage.h>
59477 #include <linux/compat.h>
59478+#include <linux/module.h>
59479 #include <linux/errno.h>
59480 #include <linux/time.h>
59481 #include <linux/signal.h>
59482diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
59483--- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
59484+++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
59485@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
59486 struct proc_dir_entry *entry;
59487
59488 /* create the current config file */
59489+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59490+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
59491+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
59492+ &ikconfig_file_ops);
59493+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59494+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
59495+ &ikconfig_file_ops);
59496+#endif
59497+#else
59498 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
59499 &ikconfig_file_ops);
59500+#endif
59501+
59502 if (!entry)
59503 return -ENOMEM;
59504
59505diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
59506--- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
59507+++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
59508@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
59509 */
59510 void __put_cred(struct cred *cred)
59511 {
59512+ pax_track_stack();
59513+
59514 kdebug("__put_cred(%p{%d,%d})", cred,
59515 atomic_read(&cred->usage),
59516 read_cred_subscribers(cred));
59517@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
59518 {
59519 struct cred *cred;
59520
59521+ pax_track_stack();
59522+
59523 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59524 atomic_read(&tsk->cred->usage),
59525 read_cred_subscribers(tsk->cred));
59526@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
59527 {
59528 const struct cred *cred;
59529
59530+ pax_track_stack();
59531+
59532 rcu_read_lock();
59533
59534 do {
59535@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
59536 {
59537 struct cred *new;
59538
59539+ pax_track_stack();
59540+
59541 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59542 if (!new)
59543 return NULL;
59544@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
59545 const struct cred *old;
59546 struct cred *new;
59547
59548+ pax_track_stack();
59549+
59550 validate_process_creds();
59551
59552 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59553@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
59554 struct thread_group_cred *tgcred = NULL;
59555 struct cred *new;
59556
59557+ pax_track_stack();
59558+
59559 #ifdef CONFIG_KEYS
59560 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59561 if (!tgcred)
59562@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
59563 struct cred *new;
59564 int ret;
59565
59566+ pax_track_stack();
59567+
59568 if (
59569 #ifdef CONFIG_KEYS
59570 !p->cred->thread_keyring &&
59571@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
59572 struct task_struct *task = current;
59573 const struct cred *old = task->real_cred;
59574
59575+ pax_track_stack();
59576+
59577 kdebug("commit_creds(%p{%d,%d})", new,
59578 atomic_read(&new->usage),
59579 read_cred_subscribers(new));
59580@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
59581
59582 get_cred(new); /* we will require a ref for the subj creds too */
59583
59584+ gr_set_role_label(task, new->uid, new->gid);
59585+
59586 /* dumpability changes */
59587 if (old->euid != new->euid ||
59588 old->egid != new->egid ||
59589@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
59590 key_fsgid_changed(task);
59591
59592 /* do it
59593- * - What if a process setreuid()'s and this brings the
59594- * new uid over his NPROC rlimit? We can check this now
59595- * cheaply with the new uid cache, so if it matters
59596- * we should be checking for it. -DaveM
59597+ * RLIMIT_NPROC limits on user->processes have already been checked
59598+ * in set_user().
59599 */
59600 alter_cred_subscribers(new, 2);
59601 if (new->user != old->user)
59602@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
59603 */
59604 void abort_creds(struct cred *new)
59605 {
59606+ pax_track_stack();
59607+
59608 kdebug("abort_creds(%p{%d,%d})", new,
59609 atomic_read(&new->usage),
59610 read_cred_subscribers(new));
59611@@ -574,6 +592,8 @@ const struct cred *override_creds(const
59612 {
59613 const struct cred *old = current->cred;
59614
59615+ pax_track_stack();
59616+
59617 kdebug("override_creds(%p{%d,%d})", new,
59618 atomic_read(&new->usage),
59619 read_cred_subscribers(new));
59620@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
59621 {
59622 const struct cred *override = current->cred;
59623
59624+ pax_track_stack();
59625+
59626 kdebug("revert_creds(%p{%d,%d})", old,
59627 atomic_read(&old->usage),
59628 read_cred_subscribers(old));
59629@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
59630 const struct cred *old;
59631 struct cred *new;
59632
59633+ pax_track_stack();
59634+
59635 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59636 if (!new)
59637 return NULL;
59638@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59639 */
59640 int set_security_override(struct cred *new, u32 secid)
59641 {
59642+ pax_track_stack();
59643+
59644 return security_kernel_act_as(new, secid);
59645 }
59646 EXPORT_SYMBOL(set_security_override);
59647@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
59648 u32 secid;
59649 int ret;
59650
59651+ pax_track_stack();
59652+
59653 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59654 if (ret < 0)
59655 return ret;
59656diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
59657--- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
59658+++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
59659@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
59660 */
59661 static atomic_t masters_in_kgdb;
59662 static atomic_t slaves_in_kgdb;
59663-static atomic_t kgdb_break_tasklet_var;
59664+static atomic_unchecked_t kgdb_break_tasklet_var;
59665 atomic_t kgdb_setting_breakpoint;
59666
59667 struct task_struct *kgdb_usethread;
59668@@ -129,7 +129,7 @@ int kgdb_single_step;
59669 static pid_t kgdb_sstep_pid;
59670
59671 /* to keep track of the CPU which is doing the single stepping*/
59672-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59673+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59674
59675 /*
59676 * If you are debugging a problem where roundup (the collection of
59677@@ -542,7 +542,7 @@ return_normal:
59678 * kernel will only try for the value of sstep_tries before
59679 * giving up and continuing on.
59680 */
59681- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59682+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59683 (kgdb_info[cpu].task &&
59684 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
59685 atomic_set(&kgdb_active, -1);
59686@@ -636,8 +636,8 @@ cpu_master_loop:
59687 }
59688
59689 kgdb_restore:
59690- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
59691- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
59692+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
59693+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
59694 if (kgdb_info[sstep_cpu].task)
59695 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
59696 else
59697@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
59698 static void kgdb_tasklet_bpt(unsigned long ing)
59699 {
59700 kgdb_breakpoint();
59701- atomic_set(&kgdb_break_tasklet_var, 0);
59702+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
59703 }
59704
59705 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
59706
59707 void kgdb_schedule_breakpoint(void)
59708 {
59709- if (atomic_read(&kgdb_break_tasklet_var) ||
59710+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
59711 atomic_read(&kgdb_active) != -1 ||
59712 atomic_read(&kgdb_setting_breakpoint))
59713 return;
59714- atomic_inc(&kgdb_break_tasklet_var);
59715+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
59716 tasklet_schedule(&kgdb_tasklet_breakpoint);
59717 }
59718 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
59719diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
59720--- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
59721+++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
59722@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
59723 list_for_each_entry(mod, kdb_modules, list) {
59724
59725 kdb_printf("%-20s%8u 0x%p ", mod->name,
59726- mod->core_size, (void *)mod);
59727+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
59728 #ifdef CONFIG_MODULE_UNLOAD
59729 kdb_printf("%4d ", module_refcount(mod));
59730 #endif
59731@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
59732 kdb_printf(" (Loading)");
59733 else
59734 kdb_printf(" (Live)");
59735- kdb_printf(" 0x%p", mod->module_core);
59736+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
59737
59738 #ifdef CONFIG_MODULE_UNLOAD
59739 {
59740diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
59741--- linux-3.0.4/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
59742+++ linux-3.0.4/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
59743@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
59744 return 0;
59745 }
59746
59747-static atomic64_t perf_event_id;
59748+static atomic64_unchecked_t perf_event_id;
59749
59750 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
59751 enum event_type_t event_type);
59752@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
59753
59754 static inline u64 perf_event_count(struct perf_event *event)
59755 {
59756- return local64_read(&event->count) + atomic64_read(&event->child_count);
59757+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
59758 }
59759
59760 static u64 perf_event_read(struct perf_event *event)
59761@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
59762 mutex_lock(&event->child_mutex);
59763 total += perf_event_read(event);
59764 *enabled += event->total_time_enabled +
59765- atomic64_read(&event->child_total_time_enabled);
59766+ atomic64_read_unchecked(&event->child_total_time_enabled);
59767 *running += event->total_time_running +
59768- atomic64_read(&event->child_total_time_running);
59769+ atomic64_read_unchecked(&event->child_total_time_running);
59770
59771 list_for_each_entry(child, &event->child_list, child_list) {
59772 total += perf_event_read(child);
59773@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
59774 userpg->offset -= local64_read(&event->hw.prev_count);
59775
59776 userpg->time_enabled = event->total_time_enabled +
59777- atomic64_read(&event->child_total_time_enabled);
59778+ atomic64_read_unchecked(&event->child_total_time_enabled);
59779
59780 userpg->time_running = event->total_time_running +
59781- atomic64_read(&event->child_total_time_running);
59782+ atomic64_read_unchecked(&event->child_total_time_running);
59783
59784 barrier();
59785 ++userpg->lock;
59786@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
59787 values[n++] = perf_event_count(event);
59788 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
59789 values[n++] = enabled +
59790- atomic64_read(&event->child_total_time_enabled);
59791+ atomic64_read_unchecked(&event->child_total_time_enabled);
59792 }
59793 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
59794 values[n++] = running +
59795- atomic64_read(&event->child_total_time_running);
59796+ atomic64_read_unchecked(&event->child_total_time_running);
59797 }
59798 if (read_format & PERF_FORMAT_ID)
59799 values[n++] = primary_event_id(event);
59800@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
59801 * need to add enough zero bytes after the string to handle
59802 * the 64bit alignment we do later.
59803 */
59804- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
59805+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
59806 if (!buf) {
59807 name = strncpy(tmp, "//enomem", sizeof(tmp));
59808 goto got_name;
59809 }
59810- name = d_path(&file->f_path, buf, PATH_MAX);
59811+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
59812 if (IS_ERR(name)) {
59813 name = strncpy(tmp, "//toolong", sizeof(tmp));
59814 goto got_name;
59815@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
59816 event->parent = parent_event;
59817
59818 event->ns = get_pid_ns(current->nsproxy->pid_ns);
59819- event->id = atomic64_inc_return(&perf_event_id);
59820+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
59821
59822 event->state = PERF_EVENT_STATE_INACTIVE;
59823
59824@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
59825 /*
59826 * Add back the child's count to the parent's count:
59827 */
59828- atomic64_add(child_val, &parent_event->child_count);
59829- atomic64_add(child_event->total_time_enabled,
59830+ atomic64_add_unchecked(child_val, &parent_event->child_count);
59831+ atomic64_add_unchecked(child_event->total_time_enabled,
59832 &parent_event->child_total_time_enabled);
59833- atomic64_add(child_event->total_time_running,
59834+ atomic64_add_unchecked(child_event->total_time_running,
59835 &parent_event->child_total_time_running);
59836
59837 /*
59838diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
59839--- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
59840+++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
59841@@ -57,6 +57,10 @@
59842 #include <asm/pgtable.h>
59843 #include <asm/mmu_context.h>
59844
59845+#ifdef CONFIG_GRKERNSEC
59846+extern rwlock_t grsec_exec_file_lock;
59847+#endif
59848+
59849 static void exit_mm(struct task_struct * tsk);
59850
59851 static void __unhash_process(struct task_struct *p, bool group_dead)
59852@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
59853 struct task_struct *leader;
59854 int zap_leader;
59855 repeat:
59856+#ifdef CONFIG_NET
59857+ gr_del_task_from_ip_table(p);
59858+#endif
59859+
59860 tracehook_prepare_release_task(p);
59861 /* don't need to get the RCU readlock here - the process is dead and
59862 * can't be modifying its own credentials. But shut RCU-lockdep up */
59863@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
59864 {
59865 write_lock_irq(&tasklist_lock);
59866
59867+#ifdef CONFIG_GRKERNSEC
59868+ write_lock(&grsec_exec_file_lock);
59869+ if (current->exec_file) {
59870+ fput(current->exec_file);
59871+ current->exec_file = NULL;
59872+ }
59873+ write_unlock(&grsec_exec_file_lock);
59874+#endif
59875+
59876 ptrace_unlink(current);
59877 /* Reparent to init */
59878 current->real_parent = current->parent = kthreadd_task;
59879 list_move_tail(&current->sibling, &current->real_parent->children);
59880
59881+ gr_set_kernel_label(current);
59882+
59883 /* Set the exit signal to SIGCHLD so we signal init on exit */
59884 current->exit_signal = SIGCHLD;
59885
59886@@ -394,7 +413,7 @@ int allow_signal(int sig)
59887 * know it'll be handled, so that they don't get converted to
59888 * SIGKILL or just silently dropped.
59889 */
59890- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59891+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59892 recalc_sigpending();
59893 spin_unlock_irq(&current->sighand->siglock);
59894 return 0;
59895@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
59896 vsnprintf(current->comm, sizeof(current->comm), name, args);
59897 va_end(args);
59898
59899+#ifdef CONFIG_GRKERNSEC
59900+ write_lock(&grsec_exec_file_lock);
59901+ if (current->exec_file) {
59902+ fput(current->exec_file);
59903+ current->exec_file = NULL;
59904+ }
59905+ write_unlock(&grsec_exec_file_lock);
59906+#endif
59907+
59908+ gr_set_kernel_label(current);
59909+
59910 /*
59911 * If we were started as result of loading a module, close all of the
59912 * user space pages. We don't need them, and if we didn't close them
59913@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
59914 struct task_struct *tsk = current;
59915 int group_dead;
59916
59917- profile_task_exit(tsk);
59918-
59919- WARN_ON(atomic_read(&tsk->fs_excl));
59920- WARN_ON(blk_needs_flush_plug(tsk));
59921-
59922 if (unlikely(in_interrupt()))
59923 panic("Aiee, killing interrupt handler!");
59924- if (unlikely(!tsk->pid))
59925- panic("Attempted to kill the idle task!");
59926
59927 /*
59928 * If do_exit is called because this processes oopsed, it's possible
59929@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
59930 */
59931 set_fs(USER_DS);
59932
59933+ profile_task_exit(tsk);
59934+
59935+ WARN_ON(atomic_read(&tsk->fs_excl));
59936+ WARN_ON(blk_needs_flush_plug(tsk));
59937+
59938+ if (unlikely(!tsk->pid))
59939+ panic("Attempted to kill the idle task!");
59940+
59941 tracehook_report_exit(&code);
59942
59943 validate_creds_for_do_exit(tsk);
59944@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
59945 tsk->exit_code = code;
59946 taskstats_exit(tsk, group_dead);
59947
59948+ gr_acl_handle_psacct(tsk, code);
59949+ gr_acl_handle_exit();
59950+
59951 exit_mm(tsk);
59952
59953 if (group_dead)
59954diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
59955--- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
59956+++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
59957@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
59958 *stackend = STACK_END_MAGIC; /* for overflow detection */
59959
59960 #ifdef CONFIG_CC_STACKPROTECTOR
59961- tsk->stack_canary = get_random_int();
59962+ tsk->stack_canary = pax_get_random_long();
59963 #endif
59964
59965 /* One for us, one for whoever does the "release_task()" (usually parent) */
59966@@ -308,13 +308,77 @@ out:
59967 }
59968
59969 #ifdef CONFIG_MMU
59970+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
59971+{
59972+ struct vm_area_struct *tmp;
59973+ unsigned long charge;
59974+ struct mempolicy *pol;
59975+ struct file *file;
59976+
59977+ charge = 0;
59978+ if (mpnt->vm_flags & VM_ACCOUNT) {
59979+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59980+ if (security_vm_enough_memory(len))
59981+ goto fail_nomem;
59982+ charge = len;
59983+ }
59984+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59985+ if (!tmp)
59986+ goto fail_nomem;
59987+ *tmp = *mpnt;
59988+ tmp->vm_mm = mm;
59989+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
59990+ pol = mpol_dup(vma_policy(mpnt));
59991+ if (IS_ERR(pol))
59992+ goto fail_nomem_policy;
59993+ vma_set_policy(tmp, pol);
59994+ if (anon_vma_fork(tmp, mpnt))
59995+ goto fail_nomem_anon_vma_fork;
59996+ tmp->vm_flags &= ~VM_LOCKED;
59997+ tmp->vm_next = tmp->vm_prev = NULL;
59998+ tmp->vm_mirror = NULL;
59999+ file = tmp->vm_file;
60000+ if (file) {
60001+ struct inode *inode = file->f_path.dentry->d_inode;
60002+ struct address_space *mapping = file->f_mapping;
60003+
60004+ get_file(file);
60005+ if (tmp->vm_flags & VM_DENYWRITE)
60006+ atomic_dec(&inode->i_writecount);
60007+ mutex_lock(&mapping->i_mmap_mutex);
60008+ if (tmp->vm_flags & VM_SHARED)
60009+ mapping->i_mmap_writable++;
60010+ flush_dcache_mmap_lock(mapping);
60011+ /* insert tmp into the share list, just after mpnt */
60012+ vma_prio_tree_add(tmp, mpnt);
60013+ flush_dcache_mmap_unlock(mapping);
60014+ mutex_unlock(&mapping->i_mmap_mutex);
60015+ }
60016+
60017+ /*
60018+ * Clear hugetlb-related page reserves for children. This only
60019+ * affects MAP_PRIVATE mappings. Faults generated by the child
60020+ * are not guaranteed to succeed, even if read-only
60021+ */
60022+ if (is_vm_hugetlb_page(tmp))
60023+ reset_vma_resv_huge_pages(tmp);
60024+
60025+ return tmp;
60026+
60027+fail_nomem_anon_vma_fork:
60028+ mpol_put(pol);
60029+fail_nomem_policy:
60030+ kmem_cache_free(vm_area_cachep, tmp);
60031+fail_nomem:
60032+ vm_unacct_memory(charge);
60033+ return NULL;
60034+}
60035+
60036 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
60037 {
60038 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
60039 struct rb_node **rb_link, *rb_parent;
60040 int retval;
60041- unsigned long charge;
60042- struct mempolicy *pol;
60043
60044 down_write(&oldmm->mmap_sem);
60045 flush_cache_dup_mm(oldmm);
60046@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
60047 mm->locked_vm = 0;
60048 mm->mmap = NULL;
60049 mm->mmap_cache = NULL;
60050- mm->free_area_cache = oldmm->mmap_base;
60051- mm->cached_hole_size = ~0UL;
60052+ mm->free_area_cache = oldmm->free_area_cache;
60053+ mm->cached_hole_size = oldmm->cached_hole_size;
60054 mm->map_count = 0;
60055 cpumask_clear(mm_cpumask(mm));
60056 mm->mm_rb = RB_ROOT;
60057@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
60058
60059 prev = NULL;
60060 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
60061- struct file *file;
60062-
60063 if (mpnt->vm_flags & VM_DONTCOPY) {
60064 long pages = vma_pages(mpnt);
60065 mm->total_vm -= pages;
60066@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
60067 -pages);
60068 continue;
60069 }
60070- charge = 0;
60071- if (mpnt->vm_flags & VM_ACCOUNT) {
60072- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
60073- if (security_vm_enough_memory(len))
60074- goto fail_nomem;
60075- charge = len;
60076- }
60077- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
60078- if (!tmp)
60079- goto fail_nomem;
60080- *tmp = *mpnt;
60081- INIT_LIST_HEAD(&tmp->anon_vma_chain);
60082- pol = mpol_dup(vma_policy(mpnt));
60083- retval = PTR_ERR(pol);
60084- if (IS_ERR(pol))
60085- goto fail_nomem_policy;
60086- vma_set_policy(tmp, pol);
60087- tmp->vm_mm = mm;
60088- if (anon_vma_fork(tmp, mpnt))
60089- goto fail_nomem_anon_vma_fork;
60090- tmp->vm_flags &= ~VM_LOCKED;
60091- tmp->vm_next = tmp->vm_prev = NULL;
60092- file = tmp->vm_file;
60093- if (file) {
60094- struct inode *inode = file->f_path.dentry->d_inode;
60095- struct address_space *mapping = file->f_mapping;
60096-
60097- get_file(file);
60098- if (tmp->vm_flags & VM_DENYWRITE)
60099- atomic_dec(&inode->i_writecount);
60100- mutex_lock(&mapping->i_mmap_mutex);
60101- if (tmp->vm_flags & VM_SHARED)
60102- mapping->i_mmap_writable++;
60103- flush_dcache_mmap_lock(mapping);
60104- /* insert tmp into the share list, just after mpnt */
60105- vma_prio_tree_add(tmp, mpnt);
60106- flush_dcache_mmap_unlock(mapping);
60107- mutex_unlock(&mapping->i_mmap_mutex);
60108+ tmp = dup_vma(mm, mpnt);
60109+ if (!tmp) {
60110+ retval = -ENOMEM;
60111+ goto out;
60112 }
60113
60114 /*
60115- * Clear hugetlb-related page reserves for children. This only
60116- * affects MAP_PRIVATE mappings. Faults generated by the child
60117- * are not guaranteed to succeed, even if read-only
60118- */
60119- if (is_vm_hugetlb_page(tmp))
60120- reset_vma_resv_huge_pages(tmp);
60121-
60122- /*
60123 * Link in the new vma and copy the page table entries.
60124 */
60125 *pprev = tmp;
60126@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
60127 if (retval)
60128 goto out;
60129 }
60130+
60131+#ifdef CONFIG_PAX_SEGMEXEC
60132+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
60133+ struct vm_area_struct *mpnt_m;
60134+
60135+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
60136+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
60137+
60138+ if (!mpnt->vm_mirror)
60139+ continue;
60140+
60141+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
60142+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
60143+ mpnt->vm_mirror = mpnt_m;
60144+ } else {
60145+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
60146+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
60147+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
60148+ mpnt->vm_mirror->vm_mirror = mpnt;
60149+ }
60150+ }
60151+ BUG_ON(mpnt_m);
60152+ }
60153+#endif
60154+
60155 /* a new mm has just been created */
60156 arch_dup_mmap(oldmm, mm);
60157 retval = 0;
60158@@ -429,14 +474,6 @@ out:
60159 flush_tlb_mm(oldmm);
60160 up_write(&oldmm->mmap_sem);
60161 return retval;
60162-fail_nomem_anon_vma_fork:
60163- mpol_put(pol);
60164-fail_nomem_policy:
60165- kmem_cache_free(vm_area_cachep, tmp);
60166-fail_nomem:
60167- retval = -ENOMEM;
60168- vm_unacct_memory(charge);
60169- goto out;
60170 }
60171
60172 static inline int mm_alloc_pgd(struct mm_struct * mm)
60173@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
60174 spin_unlock(&fs->lock);
60175 return -EAGAIN;
60176 }
60177- fs->users++;
60178+ atomic_inc(&fs->users);
60179 spin_unlock(&fs->lock);
60180 return 0;
60181 }
60182 tsk->fs = copy_fs_struct(fs);
60183 if (!tsk->fs)
60184 return -ENOMEM;
60185+ gr_set_chroot_entries(tsk, &tsk->fs->root);
60186 return 0;
60187 }
60188
60189@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
60190 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
60191 #endif
60192 retval = -EAGAIN;
60193+
60194+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
60195+
60196 if (atomic_read(&p->real_cred->user->processes) >=
60197 task_rlimit(p, RLIMIT_NPROC)) {
60198- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
60199- p->real_cred->user != INIT_USER)
60200+ if (p->real_cred->user != INIT_USER &&
60201+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
60202 goto bad_fork_free;
60203 }
60204+ current->flags &= ~PF_NPROC_EXCEEDED;
60205
60206 retval = copy_creds(p, clone_flags);
60207 if (retval < 0)
60208@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
60209 if (clone_flags & CLONE_THREAD)
60210 p->tgid = current->tgid;
60211
60212+ gr_copy_label(p);
60213+
60214 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
60215 /*
60216 * Clear TID on mm_release()?
60217@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
60218 bad_fork_free:
60219 free_task(p);
60220 fork_out:
60221+ gr_log_forkfail(retval);
60222+
60223 return ERR_PTR(retval);
60224 }
60225
60226@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
60227 if (clone_flags & CLONE_PARENT_SETTID)
60228 put_user(nr, parent_tidptr);
60229
60230+ gr_handle_brute_check();
60231+
60232 if (clone_flags & CLONE_VFORK) {
60233 p->vfork_done = &vfork;
60234 init_completion(&vfork);
60235@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
60236 return 0;
60237
60238 /* don't need lock here; in the worst case we'll do useless copy */
60239- if (fs->users == 1)
60240+ if (atomic_read(&fs->users) == 1)
60241 return 0;
60242
60243 *new_fsp = copy_fs_struct(fs);
60244@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
60245 fs = current->fs;
60246 spin_lock(&fs->lock);
60247 current->fs = new_fs;
60248- if (--fs->users)
60249+ gr_set_chroot_entries(current, &current->fs->root);
60250+ if (atomic_dec_return(&fs->users))
60251 new_fs = NULL;
60252 else
60253 new_fs = fs;
60254diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
60255--- linux-3.0.4/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
60256+++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
60257@@ -54,6 +54,7 @@
60258 #include <linux/mount.h>
60259 #include <linux/pagemap.h>
60260 #include <linux/syscalls.h>
60261+#include <linux/ptrace.h>
60262 #include <linux/signal.h>
60263 #include <linux/module.h>
60264 #include <linux/magic.h>
60265@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
60266 struct page *page, *page_head;
60267 int err, ro = 0;
60268
60269+#ifdef CONFIG_PAX_SEGMEXEC
60270+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
60271+ return -EFAULT;
60272+#endif
60273+
60274 /*
60275 * The futex address must be "naturally" aligned.
60276 */
60277@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
60278 struct futex_q q = futex_q_init;
60279 int ret;
60280
60281+ pax_track_stack();
60282+
60283 if (!bitset)
60284 return -EINVAL;
60285 q.bitset = bitset;
60286@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
60287 struct futex_q q = futex_q_init;
60288 int res, ret;
60289
60290+ pax_track_stack();
60291+
60292 if (!bitset)
60293 return -EINVAL;
60294
60295@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60296 {
60297 struct robust_list_head __user *head;
60298 unsigned long ret;
60299+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60300 const struct cred *cred = current_cred(), *pcred;
60301+#endif
60302
60303 if (!futex_cmpxchg_enabled)
60304 return -ENOSYS;
60305@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60306 if (!p)
60307 goto err_unlock;
60308 ret = -EPERM;
60309+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60310+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
60311+ goto err_unlock;
60312+#else
60313 pcred = __task_cred(p);
60314 /* If victim is in different user_ns, then uids are not
60315 comparable, so we must have CAP_SYS_PTRACE */
60316@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60317 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
60318 goto err_unlock;
60319 ok:
60320+#endif
60321 head = p->robust_list;
60322 rcu_read_unlock();
60323 }
60324@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
60325 {
60326 u32 curval;
60327 int i;
60328+ mm_segment_t oldfs;
60329
60330 /*
60331 * This will fail and we want it. Some arch implementations do
60332@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
60333 * implementation, the non-functional ones will return
60334 * -ENOSYS.
60335 */
60336+ oldfs = get_fs();
60337+ set_fs(USER_DS);
60338 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
60339 futex_cmpxchg_enabled = 1;
60340+ set_fs(oldfs);
60341
60342 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
60343 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
60344diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
60345--- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
60346+++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
60347@@ -10,6 +10,7 @@
60348 #include <linux/compat.h>
60349 #include <linux/nsproxy.h>
60350 #include <linux/futex.h>
60351+#include <linux/ptrace.h>
60352
60353 #include <asm/uaccess.h>
60354
60355@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
60356 {
60357 struct compat_robust_list_head __user *head;
60358 unsigned long ret;
60359- const struct cred *cred = current_cred(), *pcred;
60360+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60361+ const struct cred *cred = current_cred();
60362+ const struct cred *pcred;
60363+#endif
60364
60365 if (!futex_cmpxchg_enabled)
60366 return -ENOSYS;
60367@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
60368 if (!p)
60369 goto err_unlock;
60370 ret = -EPERM;
60371+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60372+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
60373+ goto err_unlock;
60374+#else
60375 pcred = __task_cred(p);
60376 /* If victim is in different user_ns, then uids are not
60377 comparable, so we must have CAP_SYS_PTRACE */
60378@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
60379 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
60380 goto err_unlock;
60381 ok:
60382+#endif
60383 head = p->compat_robust_list;
60384 rcu_read_unlock();
60385 }
60386diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
60387--- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
60388+++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
60389@@ -102,11 +102,6 @@ void gcov_enable_events(void)
60390 }
60391
60392 #ifdef CONFIG_MODULES
60393-static inline int within(void *addr, void *start, unsigned long size)
60394-{
60395- return ((addr >= start) && (addr < start + size));
60396-}
60397-
60398 /* Update list and generate events when modules are unloaded. */
60399 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
60400 void *data)
60401@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
60402 prev = NULL;
60403 /* Remove entries located in module from linked list. */
60404 for (info = gcov_info_head; info; info = info->next) {
60405- if (within(info, mod->module_core, mod->core_size)) {
60406+ if (within_module_core_rw((unsigned long)info, mod)) {
60407 if (prev)
60408 prev->next = info->next;
60409 else
60410diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
60411--- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
60412+++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
60413@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
60414 local_irq_restore(flags);
60415 }
60416
60417-static void run_hrtimer_softirq(struct softirq_action *h)
60418+static void run_hrtimer_softirq(void)
60419 {
60420 hrtimer_peek_ahead_timers();
60421 }
60422diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
60423--- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
60424+++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
60425@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
60426
60427 size = (((unsigned long)stop - (unsigned long)start)
60428 / sizeof(struct jump_entry));
60429+ pax_open_kernel();
60430 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
60431+ pax_close_kernel();
60432 }
60433
60434 static void jump_label_update(struct jump_label_key *key, int enable);
60435@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
60436 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
60437 struct jump_entry *iter;
60438
60439+ pax_open_kernel();
60440 for (iter = iter_start; iter < iter_stop; iter++) {
60441 if (within_module_init(iter->code, mod))
60442 iter->code = 0;
60443 }
60444+ pax_close_kernel();
60445 }
60446
60447 static int
60448diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
60449--- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
60450+++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
60451@@ -11,6 +11,9 @@
60452 * Changed the compression method from stem compression to "table lookup"
60453 * compression (see scripts/kallsyms.c for a more complete description)
60454 */
60455+#ifdef CONFIG_GRKERNSEC_HIDESYM
60456+#define __INCLUDED_BY_HIDESYM 1
60457+#endif
60458 #include <linux/kallsyms.h>
60459 #include <linux/module.h>
60460 #include <linux/init.h>
60461@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
60462
60463 static inline int is_kernel_inittext(unsigned long addr)
60464 {
60465+ if (system_state != SYSTEM_BOOTING)
60466+ return 0;
60467+
60468 if (addr >= (unsigned long)_sinittext
60469 && addr <= (unsigned long)_einittext)
60470 return 1;
60471 return 0;
60472 }
60473
60474+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60475+#ifdef CONFIG_MODULES
60476+static inline int is_module_text(unsigned long addr)
60477+{
60478+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
60479+ return 1;
60480+
60481+ addr = ktla_ktva(addr);
60482+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
60483+}
60484+#else
60485+static inline int is_module_text(unsigned long addr)
60486+{
60487+ return 0;
60488+}
60489+#endif
60490+#endif
60491+
60492 static inline int is_kernel_text(unsigned long addr)
60493 {
60494 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
60495@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
60496
60497 static inline int is_kernel(unsigned long addr)
60498 {
60499+
60500+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60501+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
60502+ return 1;
60503+
60504+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
60505+#else
60506 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
60507+#endif
60508+
60509 return 1;
60510 return in_gate_area_no_mm(addr);
60511 }
60512
60513 static int is_ksym_addr(unsigned long addr)
60514 {
60515+
60516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60517+ if (is_module_text(addr))
60518+ return 0;
60519+#endif
60520+
60521 if (all_var)
60522 return is_kernel(addr);
60523
60524@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
60525
60526 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
60527 {
60528- iter->name[0] = '\0';
60529 iter->nameoff = get_symbol_offset(new_pos);
60530 iter->pos = new_pos;
60531 }
60532@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
60533 {
60534 struct kallsym_iter *iter = m->private;
60535
60536+#ifdef CONFIG_GRKERNSEC_HIDESYM
60537+ if (current_uid())
60538+ return 0;
60539+#endif
60540+
60541 /* Some debugging symbols have no name. Ignore them. */
60542 if (!iter->name[0])
60543 return 0;
60544@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
60545 struct kallsym_iter *iter;
60546 int ret;
60547
60548- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60549+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60550 if (!iter)
60551 return -ENOMEM;
60552 reset_iter(iter, 0);
60553diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
60554--- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
60555+++ linux-3.0.4/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
60556@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60557 * If module auto-loading support is disabled then this function
60558 * becomes a no-operation.
60559 */
60560-int __request_module(bool wait, const char *fmt, ...)
60561+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60562 {
60563- va_list args;
60564 char module_name[MODULE_NAME_LEN];
60565 unsigned int max_modprobes;
60566 int ret;
60567- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60568+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60569 static char *envp[] = { "HOME=/",
60570 "TERM=linux",
60571 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60572@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
60573 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
60574 static int kmod_loop_msg;
60575
60576- va_start(args, fmt);
60577- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60578- va_end(args);
60579+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60580 if (ret >= MODULE_NAME_LEN)
60581 return -ENAMETOOLONG;
60582
60583@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
60584 if (ret)
60585 return ret;
60586
60587+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60588+ if (!current_uid()) {
60589+ /* hack to workaround consolekit/udisks stupidity */
60590+ read_lock(&tasklist_lock);
60591+ if (!strcmp(current->comm, "mount") &&
60592+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60593+ read_unlock(&tasklist_lock);
60594+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60595+ return -EPERM;
60596+ }
60597+ read_unlock(&tasklist_lock);
60598+ }
60599+#endif
60600+
60601 /* If modprobe needs a service that is in a module, we get a recursive
60602 * loop. Limit the number of running kmod threads to max_threads/2 or
60603 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60604@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
60605 atomic_dec(&kmod_concurrent);
60606 return ret;
60607 }
60608+
60609+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60610+{
60611+ va_list args;
60612+ int ret;
60613+
60614+ va_start(args, fmt);
60615+ ret = ____request_module(wait, module_param, fmt, args);
60616+ va_end(args);
60617+
60618+ return ret;
60619+}
60620+
60621+int __request_module(bool wait, const char *fmt, ...)
60622+{
60623+ va_list args;
60624+ int ret;
60625+
60626+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60627+ if (current_uid()) {
60628+ char module_param[MODULE_NAME_LEN];
60629+
60630+ memset(module_param, 0, sizeof(module_param));
60631+
60632+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60633+
60634+ va_start(args, fmt);
60635+ ret = ____request_module(wait, module_param, fmt, args);
60636+ va_end(args);
60637+
60638+ return ret;
60639+ }
60640+#endif
60641+
60642+ va_start(args, fmt);
60643+ ret = ____request_module(wait, NULL, fmt, args);
60644+ va_end(args);
60645+
60646+ return ret;
60647+}
60648+
60649 EXPORT_SYMBOL(__request_module);
60650 #endif /* CONFIG_MODULES */
60651
60652diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
60653--- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
60654+++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
60655@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
60656 * kernel image and loaded module images reside. This is required
60657 * so x86_64 can correctly handle the %rip-relative fixups.
60658 */
60659- kip->insns = module_alloc(PAGE_SIZE);
60660+ kip->insns = module_alloc_exec(PAGE_SIZE);
60661 if (!kip->insns) {
60662 kfree(kip);
60663 return NULL;
60664@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
60665 */
60666 if (!list_is_singular(&kip->list)) {
60667 list_del(&kip->list);
60668- module_free(NULL, kip->insns);
60669+ module_free_exec(NULL, kip->insns);
60670 kfree(kip);
60671 }
60672 return 1;
60673@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
60674 {
60675 int i, err = 0;
60676 unsigned long offset = 0, size = 0;
60677- char *modname, namebuf[128];
60678+ char *modname, namebuf[KSYM_NAME_LEN];
60679 const char *symbol_name;
60680 void *addr;
60681 struct kprobe_blackpoint *kb;
60682@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
60683 const char *sym = NULL;
60684 unsigned int i = *(loff_t *) v;
60685 unsigned long offset = 0;
60686- char *modname, namebuf[128];
60687+ char *modname, namebuf[KSYM_NAME_LEN];
60688
60689 head = &kprobe_table[i];
60690 preempt_disable();
60691diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
60692--- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
60693+++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
60694@@ -583,6 +583,10 @@ static int static_obj(void *obj)
60695 end = (unsigned long) &_end,
60696 addr = (unsigned long) obj;
60697
60698+#ifdef CONFIG_PAX_KERNEXEC
60699+ start = ktla_ktva(start);
60700+#endif
60701+
60702 /*
60703 * static variable?
60704 */
60705@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
60706 if (!static_obj(lock->key)) {
60707 debug_locks_off();
60708 printk("INFO: trying to register non-static key.\n");
60709+ printk("lock:%pS key:%pS.\n", lock, lock->key);
60710 printk("the code is fine but needs lockdep annotation.\n");
60711 printk("turning off the locking correctness validator.\n");
60712 dump_stack();
60713@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
60714 if (!class)
60715 return 0;
60716 }
60717- atomic_inc((atomic_t *)&class->ops);
60718+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
60719 if (very_verbose(class)) {
60720 printk("\nacquire class [%p] %s", class->key, class->name);
60721 if (class->name_version > 1)
60722diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
60723--- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
60724+++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
60725@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60726
60727 static void print_name(struct seq_file *m, struct lock_class *class)
60728 {
60729- char str[128];
60730+ char str[KSYM_NAME_LEN];
60731 const char *name = class->name;
60732
60733 if (!name) {
60734diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
60735--- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
60736+++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
60737@@ -58,6 +58,7 @@
60738 #include <linux/jump_label.h>
60739 #include <linux/pfn.h>
60740 #include <linux/bsearch.h>
60741+#include <linux/grsecurity.h>
60742
60743 #define CREATE_TRACE_POINTS
60744 #include <trace/events/module.h>
60745@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
60746
60747 /* Bounds of module allocation, for speeding __module_address.
60748 * Protected by module_mutex. */
60749-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60750+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60751+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60752
60753 int register_module_notifier(struct notifier_block * nb)
60754 {
60755@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
60756 return true;
60757
60758 list_for_each_entry_rcu(mod, &modules, list) {
60759- struct symsearch arr[] = {
60760+ struct symsearch modarr[] = {
60761 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60762 NOT_GPL_ONLY, false },
60763 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60764@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
60765 #endif
60766 };
60767
60768- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60769+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60770 return true;
60771 }
60772 return false;
60773@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
60774 static int percpu_modalloc(struct module *mod,
60775 unsigned long size, unsigned long align)
60776 {
60777- if (align > PAGE_SIZE) {
60778+ if (align-1 >= PAGE_SIZE) {
60779 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60780 mod->name, align, PAGE_SIZE);
60781 align = PAGE_SIZE;
60782@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
60783 */
60784 #ifdef CONFIG_SYSFS
60785
60786-#ifdef CONFIG_KALLSYMS
60787+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60788 static inline bool sect_empty(const Elf_Shdr *sect)
60789 {
60790 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
60791@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
60792
60793 static void unset_module_core_ro_nx(struct module *mod)
60794 {
60795- set_page_attributes(mod->module_core + mod->core_text_size,
60796- mod->module_core + mod->core_size,
60797+ set_page_attributes(mod->module_core_rw,
60798+ mod->module_core_rw + mod->core_size_rw,
60799 set_memory_x);
60800- set_page_attributes(mod->module_core,
60801- mod->module_core + mod->core_ro_size,
60802+ set_page_attributes(mod->module_core_rx,
60803+ mod->module_core_rx + mod->core_size_rx,
60804 set_memory_rw);
60805 }
60806
60807 static void unset_module_init_ro_nx(struct module *mod)
60808 {
60809- set_page_attributes(mod->module_init + mod->init_text_size,
60810- mod->module_init + mod->init_size,
60811+ set_page_attributes(mod->module_init_rw,
60812+ mod->module_init_rw + mod->init_size_rw,
60813 set_memory_x);
60814- set_page_attributes(mod->module_init,
60815- mod->module_init + mod->init_ro_size,
60816+ set_page_attributes(mod->module_init_rx,
60817+ mod->module_init_rx + mod->init_size_rx,
60818 set_memory_rw);
60819 }
60820
60821@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
60822
60823 mutex_lock(&module_mutex);
60824 list_for_each_entry_rcu(mod, &modules, list) {
60825- if ((mod->module_core) && (mod->core_text_size)) {
60826- set_page_attributes(mod->module_core,
60827- mod->module_core + mod->core_text_size,
60828+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
60829+ set_page_attributes(mod->module_core_rx,
60830+ mod->module_core_rx + mod->core_size_rx,
60831 set_memory_rw);
60832 }
60833- if ((mod->module_init) && (mod->init_text_size)) {
60834- set_page_attributes(mod->module_init,
60835- mod->module_init + mod->init_text_size,
60836+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
60837+ set_page_attributes(mod->module_init_rx,
60838+ mod->module_init_rx + mod->init_size_rx,
60839 set_memory_rw);
60840 }
60841 }
60842@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
60843
60844 mutex_lock(&module_mutex);
60845 list_for_each_entry_rcu(mod, &modules, list) {
60846- if ((mod->module_core) && (mod->core_text_size)) {
60847- set_page_attributes(mod->module_core,
60848- mod->module_core + mod->core_text_size,
60849+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
60850+ set_page_attributes(mod->module_core_rx,
60851+ mod->module_core_rx + mod->core_size_rx,
60852 set_memory_ro);
60853 }
60854- if ((mod->module_init) && (mod->init_text_size)) {
60855- set_page_attributes(mod->module_init,
60856- mod->module_init + mod->init_text_size,
60857+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
60858+ set_page_attributes(mod->module_init_rx,
60859+ mod->module_init_rx + mod->init_size_rx,
60860 set_memory_ro);
60861 }
60862 }
60863@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
60864
60865 /* This may be NULL, but that's OK */
60866 unset_module_init_ro_nx(mod);
60867- module_free(mod, mod->module_init);
60868+ module_free(mod, mod->module_init_rw);
60869+ module_free_exec(mod, mod->module_init_rx);
60870 kfree(mod->args);
60871 percpu_modfree(mod);
60872
60873 /* Free lock-classes: */
60874- lockdep_free_key_range(mod->module_core, mod->core_size);
60875+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60876+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60877
60878 /* Finally, free the core (containing the module structure) */
60879 unset_module_core_ro_nx(mod);
60880- module_free(mod, mod->module_core);
60881+ module_free_exec(mod, mod->module_core_rx);
60882+ module_free(mod, mod->module_core_rw);
60883
60884 #ifdef CONFIG_MPU
60885 update_protections(current->mm);
60886@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
60887 unsigned int i;
60888 int ret = 0;
60889 const struct kernel_symbol *ksym;
60890+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60891+ int is_fs_load = 0;
60892+ int register_filesystem_found = 0;
60893+ char *p;
60894+
60895+ p = strstr(mod->args, "grsec_modharden_fs");
60896+ if (p) {
60897+ char *endptr = p + strlen("grsec_modharden_fs");
60898+ /* copy \0 as well */
60899+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60900+ is_fs_load = 1;
60901+ }
60902+#endif
60903
60904 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
60905 const char *name = info->strtab + sym[i].st_name;
60906
60907+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60908+ /* it's a real shame this will never get ripped and copied
60909+ upstream! ;(
60910+ */
60911+ if (is_fs_load && !strcmp(name, "register_filesystem"))
60912+ register_filesystem_found = 1;
60913+#endif
60914+
60915 switch (sym[i].st_shndx) {
60916 case SHN_COMMON:
60917 /* We compiled with -fno-common. These are not
60918@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
60919 ksym = resolve_symbol_wait(mod, info, name);
60920 /* Ok if resolved. */
60921 if (ksym && !IS_ERR(ksym)) {
60922+ pax_open_kernel();
60923 sym[i].st_value = ksym->value;
60924+ pax_close_kernel();
60925 break;
60926 }
60927
60928@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
60929 secbase = (unsigned long)mod_percpu(mod);
60930 else
60931 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
60932+ pax_open_kernel();
60933 sym[i].st_value += secbase;
60934+ pax_close_kernel();
60935 break;
60936 }
60937 }
60938
60939+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60940+ if (is_fs_load && !register_filesystem_found) {
60941+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60942+ ret = -EPERM;
60943+ }
60944+#endif
60945+
60946 return ret;
60947 }
60948
60949@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
60950 || s->sh_entsize != ~0UL
60951 || strstarts(sname, ".init"))
60952 continue;
60953- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60954+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60955+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60956+ else
60957+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60958 DEBUGP("\t%s\n", name);
60959 }
60960- switch (m) {
60961- case 0: /* executable */
60962- mod->core_size = debug_align(mod->core_size);
60963- mod->core_text_size = mod->core_size;
60964- break;
60965- case 1: /* RO: text and ro-data */
60966- mod->core_size = debug_align(mod->core_size);
60967- mod->core_ro_size = mod->core_size;
60968- break;
60969- case 3: /* whole core */
60970- mod->core_size = debug_align(mod->core_size);
60971- break;
60972- }
60973 }
60974
60975 DEBUGP("Init section allocation order:\n");
60976@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
60977 || s->sh_entsize != ~0UL
60978 || !strstarts(sname, ".init"))
60979 continue;
60980- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60981- | INIT_OFFSET_MASK);
60982+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60983+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60984+ else
60985+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60986+ s->sh_entsize |= INIT_OFFSET_MASK;
60987 DEBUGP("\t%s\n", sname);
60988 }
60989- switch (m) {
60990- case 0: /* executable */
60991- mod->init_size = debug_align(mod->init_size);
60992- mod->init_text_size = mod->init_size;
60993- break;
60994- case 1: /* RO: text and ro-data */
60995- mod->init_size = debug_align(mod->init_size);
60996- mod->init_ro_size = mod->init_size;
60997- break;
60998- case 3: /* whole init */
60999- mod->init_size = debug_align(mod->init_size);
61000- break;
61001- }
61002 }
61003 }
61004
61005@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
61006
61007 /* Put symbol section at end of init part of module. */
61008 symsect->sh_flags |= SHF_ALLOC;
61009- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
61010+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
61011 info->index.sym) | INIT_OFFSET_MASK;
61012 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
61013
61014@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
61015 }
61016
61017 /* Append room for core symbols at end of core part. */
61018- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
61019- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
61020+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
61021+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
61022
61023 /* Put string table section at end of init part of module. */
61024 strsect->sh_flags |= SHF_ALLOC;
61025- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
61026+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
61027 info->index.str) | INIT_OFFSET_MASK;
61028 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
61029
61030 /* Append room for core symbols' strings at end of core part. */
61031- info->stroffs = mod->core_size;
61032+ info->stroffs = mod->core_size_rx;
61033 __set_bit(0, info->strmap);
61034- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
61035+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
61036 }
61037
61038 static void add_kallsyms(struct module *mod, const struct load_info *info)
61039@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
61040 /* Make sure we get permanent strtab: don't use info->strtab. */
61041 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
61042
61043+ pax_open_kernel();
61044+
61045 /* Set types up while we still have access to sections. */
61046 for (i = 0; i < mod->num_symtab; i++)
61047 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
61048
61049- mod->core_symtab = dst = mod->module_core + info->symoffs;
61050+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
61051 src = mod->symtab;
61052 *dst = *src;
61053 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
61054@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
61055 }
61056 mod->core_num_syms = ndst;
61057
61058- mod->core_strtab = s = mod->module_core + info->stroffs;
61059+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
61060 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
61061 if (test_bit(i, info->strmap))
61062 *++s = mod->strtab[i];
61063+
61064+ pax_close_kernel();
61065 }
61066 #else
61067 static inline void layout_symtab(struct module *mod, struct load_info *info)
61068@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
61069 ddebug_remove_module(debug->modname);
61070 }
61071
61072-static void *module_alloc_update_bounds(unsigned long size)
61073+static void *module_alloc_update_bounds_rw(unsigned long size)
61074 {
61075 void *ret = module_alloc(size);
61076
61077 if (ret) {
61078 mutex_lock(&module_mutex);
61079 /* Update module bounds. */
61080- if ((unsigned long)ret < module_addr_min)
61081- module_addr_min = (unsigned long)ret;
61082- if ((unsigned long)ret + size > module_addr_max)
61083- module_addr_max = (unsigned long)ret + size;
61084+ if ((unsigned long)ret < module_addr_min_rw)
61085+ module_addr_min_rw = (unsigned long)ret;
61086+ if ((unsigned long)ret + size > module_addr_max_rw)
61087+ module_addr_max_rw = (unsigned long)ret + size;
61088+ mutex_unlock(&module_mutex);
61089+ }
61090+ return ret;
61091+}
61092+
61093+static void *module_alloc_update_bounds_rx(unsigned long size)
61094+{
61095+ void *ret = module_alloc_exec(size);
61096+
61097+ if (ret) {
61098+ mutex_lock(&module_mutex);
61099+ /* Update module bounds. */
61100+ if ((unsigned long)ret < module_addr_min_rx)
61101+ module_addr_min_rx = (unsigned long)ret;
61102+ if ((unsigned long)ret + size > module_addr_max_rx)
61103+ module_addr_max_rx = (unsigned long)ret + size;
61104 mutex_unlock(&module_mutex);
61105 }
61106 return ret;
61107@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
61108 void *ptr;
61109
61110 /* Do the allocs. */
61111- ptr = module_alloc_update_bounds(mod->core_size);
61112+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
61113 /*
61114 * The pointer to this block is stored in the module structure
61115 * which is inside the block. Just mark it as not being a
61116@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
61117 if (!ptr)
61118 return -ENOMEM;
61119
61120- memset(ptr, 0, mod->core_size);
61121- mod->module_core = ptr;
61122+ memset(ptr, 0, mod->core_size_rw);
61123+ mod->module_core_rw = ptr;
61124
61125- ptr = module_alloc_update_bounds(mod->init_size);
61126+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
61127 /*
61128 * The pointer to this block is stored in the module structure
61129 * which is inside the block. This block doesn't need to be
61130 * scanned as it contains data and code that will be freed
61131 * after the module is initialized.
61132 */
61133- kmemleak_ignore(ptr);
61134- if (!ptr && mod->init_size) {
61135- module_free(mod, mod->module_core);
61136+ kmemleak_not_leak(ptr);
61137+ if (!ptr && mod->init_size_rw) {
61138+ module_free(mod, mod->module_core_rw);
61139 return -ENOMEM;
61140 }
61141- memset(ptr, 0, mod->init_size);
61142- mod->module_init = ptr;
61143+ memset(ptr, 0, mod->init_size_rw);
61144+ mod->module_init_rw = ptr;
61145+
61146+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
61147+ kmemleak_not_leak(ptr);
61148+ if (!ptr) {
61149+ module_free(mod, mod->module_init_rw);
61150+ module_free(mod, mod->module_core_rw);
61151+ return -ENOMEM;
61152+ }
61153+
61154+ pax_open_kernel();
61155+ memset(ptr, 0, mod->core_size_rx);
61156+ pax_close_kernel();
61157+ mod->module_core_rx = ptr;
61158+
61159+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
61160+ kmemleak_not_leak(ptr);
61161+ if (!ptr && mod->init_size_rx) {
61162+ module_free_exec(mod, mod->module_core_rx);
61163+ module_free(mod, mod->module_init_rw);
61164+ module_free(mod, mod->module_core_rw);
61165+ return -ENOMEM;
61166+ }
61167+
61168+ pax_open_kernel();
61169+ memset(ptr, 0, mod->init_size_rx);
61170+ pax_close_kernel();
61171+ mod->module_init_rx = ptr;
61172
61173 /* Transfer each section which specifies SHF_ALLOC */
61174 DEBUGP("final section addresses:\n");
61175@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
61176 if (!(shdr->sh_flags & SHF_ALLOC))
61177 continue;
61178
61179- if (shdr->sh_entsize & INIT_OFFSET_MASK)
61180- dest = mod->module_init
61181- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
61182- else
61183- dest = mod->module_core + shdr->sh_entsize;
61184+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
61185+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
61186+ dest = mod->module_init_rw
61187+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
61188+ else
61189+ dest = mod->module_init_rx
61190+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
61191+ } else {
61192+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
61193+ dest = mod->module_core_rw + shdr->sh_entsize;
61194+ else
61195+ dest = mod->module_core_rx + shdr->sh_entsize;
61196+ }
61197+
61198+ if (shdr->sh_type != SHT_NOBITS) {
61199+
61200+#ifdef CONFIG_PAX_KERNEXEC
61201+#ifdef CONFIG_X86_64
61202+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
61203+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
61204+#endif
61205+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
61206+ pax_open_kernel();
61207+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
61208+ pax_close_kernel();
61209+ } else
61210+#endif
61211
61212- if (shdr->sh_type != SHT_NOBITS)
61213 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
61214+ }
61215 /* Update sh_addr to point to copy in image. */
61216- shdr->sh_addr = (unsigned long)dest;
61217+
61218+#ifdef CONFIG_PAX_KERNEXEC
61219+ if (shdr->sh_flags & SHF_EXECINSTR)
61220+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
61221+ else
61222+#endif
61223+
61224+ shdr->sh_addr = (unsigned long)dest;
61225 DEBUGP("\t0x%lx %s\n",
61226 shdr->sh_addr, info->secstrings + shdr->sh_name);
61227 }
61228@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
61229 * Do it before processing of module parameters, so the module
61230 * can provide parameter accessor functions of its own.
61231 */
61232- if (mod->module_init)
61233- flush_icache_range((unsigned long)mod->module_init,
61234- (unsigned long)mod->module_init
61235- + mod->init_size);
61236- flush_icache_range((unsigned long)mod->module_core,
61237- (unsigned long)mod->module_core + mod->core_size);
61238+ if (mod->module_init_rx)
61239+ flush_icache_range((unsigned long)mod->module_init_rx,
61240+ (unsigned long)mod->module_init_rx
61241+ + mod->init_size_rx);
61242+ flush_icache_range((unsigned long)mod->module_core_rx,
61243+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
61244
61245 set_fs(old_fs);
61246 }
61247@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
61248 {
61249 kfree(info->strmap);
61250 percpu_modfree(mod);
61251- module_free(mod, mod->module_init);
61252- module_free(mod, mod->module_core);
61253+ module_free_exec(mod, mod->module_init_rx);
61254+ module_free_exec(mod, mod->module_core_rx);
61255+ module_free(mod, mod->module_init_rw);
61256+ module_free(mod, mod->module_core_rw);
61257 }
61258
61259 static int post_relocation(struct module *mod, const struct load_info *info)
61260@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
61261 if (err)
61262 goto free_unload;
61263
61264+ /* Now copy in args */
61265+ mod->args = strndup_user(uargs, ~0UL >> 1);
61266+ if (IS_ERR(mod->args)) {
61267+ err = PTR_ERR(mod->args);
61268+ goto free_unload;
61269+ }
61270+
61271 /* Set up MODINFO_ATTR fields */
61272 setup_modinfo(mod, &info);
61273
61274+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61275+ {
61276+ char *p, *p2;
61277+
61278+ if (strstr(mod->args, "grsec_modharden_netdev")) {
61279+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
61280+ err = -EPERM;
61281+ goto free_modinfo;
61282+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
61283+ p += strlen("grsec_modharden_normal");
61284+ p2 = strstr(p, "_");
61285+ if (p2) {
61286+ *p2 = '\0';
61287+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
61288+ *p2 = '_';
61289+ }
61290+ err = -EPERM;
61291+ goto free_modinfo;
61292+ }
61293+ }
61294+#endif
61295+
61296 /* Fix up syms, so that st_value is a pointer to location. */
61297 err = simplify_symbols(mod, &info);
61298 if (err < 0)
61299@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
61300
61301 flush_module_icache(mod);
61302
61303- /* Now copy in args */
61304- mod->args = strndup_user(uargs, ~0UL >> 1);
61305- if (IS_ERR(mod->args)) {
61306- err = PTR_ERR(mod->args);
61307- goto free_arch_cleanup;
61308- }
61309-
61310 /* Mark state as coming so strong_try_module_get() ignores us. */
61311 mod->state = MODULE_STATE_COMING;
61312
61313@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
61314 unlock:
61315 mutex_unlock(&module_mutex);
61316 synchronize_sched();
61317- kfree(mod->args);
61318- free_arch_cleanup:
61319 module_arch_cleanup(mod);
61320 free_modinfo:
61321 free_modinfo(mod);
61322+ kfree(mod->args);
61323 free_unload:
61324 module_unload_free(mod);
61325 free_module:
61326@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
61327 MODULE_STATE_COMING, mod);
61328
61329 /* Set RO and NX regions for core */
61330- set_section_ro_nx(mod->module_core,
61331- mod->core_text_size,
61332- mod->core_ro_size,
61333- mod->core_size);
61334+ set_section_ro_nx(mod->module_core_rx,
61335+ mod->core_size_rx,
61336+ mod->core_size_rx,
61337+ mod->core_size_rx);
61338
61339 /* Set RO and NX regions for init */
61340- set_section_ro_nx(mod->module_init,
61341- mod->init_text_size,
61342- mod->init_ro_size,
61343- mod->init_size);
61344+ set_section_ro_nx(mod->module_init_rx,
61345+ mod->init_size_rx,
61346+ mod->init_size_rx,
61347+ mod->init_size_rx);
61348
61349 do_mod_ctors(mod);
61350 /* Start the module */
61351@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
61352 mod->strtab = mod->core_strtab;
61353 #endif
61354 unset_module_init_ro_nx(mod);
61355- module_free(mod, mod->module_init);
61356- mod->module_init = NULL;
61357- mod->init_size = 0;
61358- mod->init_ro_size = 0;
61359- mod->init_text_size = 0;
61360+ module_free(mod, mod->module_init_rw);
61361+ module_free_exec(mod, mod->module_init_rx);
61362+ mod->module_init_rw = NULL;
61363+ mod->module_init_rx = NULL;
61364+ mod->init_size_rw = 0;
61365+ mod->init_size_rx = 0;
61366 mutex_unlock(&module_mutex);
61367
61368 return 0;
61369@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
61370 unsigned long nextval;
61371
61372 /* At worse, next value is at end of module */
61373- if (within_module_init(addr, mod))
61374- nextval = (unsigned long)mod->module_init+mod->init_text_size;
61375+ if (within_module_init_rx(addr, mod))
61376+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
61377+ else if (within_module_init_rw(addr, mod))
61378+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
61379+ else if (within_module_core_rx(addr, mod))
61380+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
61381+ else if (within_module_core_rw(addr, mod))
61382+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
61383 else
61384- nextval = (unsigned long)mod->module_core+mod->core_text_size;
61385+ return NULL;
61386
61387 /* Scan for closest preceding symbol, and next symbol. (ELF
61388 starts real symbols at 1). */
61389@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
61390 char buf[8];
61391
61392 seq_printf(m, "%s %u",
61393- mod->name, mod->init_size + mod->core_size);
61394+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
61395 print_unload_info(m, mod);
61396
61397 /* Informative for users. */
61398@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
61399 mod->state == MODULE_STATE_COMING ? "Loading":
61400 "Live");
61401 /* Used by oprofile and other similar tools. */
61402- seq_printf(m, " 0x%pK", mod->module_core);
61403+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
61404
61405 /* Taints info */
61406 if (mod->taints)
61407@@ -3283,7 +3406,17 @@ static const struct file_operations proc
61408
61409 static int __init proc_modules_init(void)
61410 {
61411+#ifndef CONFIG_GRKERNSEC_HIDESYM
61412+#ifdef CONFIG_GRKERNSEC_PROC_USER
61413+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61414+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61415+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
61416+#else
61417 proc_create("modules", 0, NULL, &proc_modules_operations);
61418+#endif
61419+#else
61420+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61421+#endif
61422 return 0;
61423 }
61424 module_init(proc_modules_init);
61425@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
61426 {
61427 struct module *mod;
61428
61429- if (addr < module_addr_min || addr > module_addr_max)
61430+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
61431+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
61432 return NULL;
61433
61434 list_for_each_entry_rcu(mod, &modules, list)
61435- if (within_module_core(addr, mod)
61436- || within_module_init(addr, mod))
61437+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
61438 return mod;
61439 return NULL;
61440 }
61441@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
61442 */
61443 struct module *__module_text_address(unsigned long addr)
61444 {
61445- struct module *mod = __module_address(addr);
61446+ struct module *mod;
61447+
61448+#ifdef CONFIG_X86_32
61449+ addr = ktla_ktva(addr);
61450+#endif
61451+
61452+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
61453+ return NULL;
61454+
61455+ mod = __module_address(addr);
61456+
61457 if (mod) {
61458 /* Make sure it's within the text section. */
61459- if (!within(addr, mod->module_init, mod->init_text_size)
61460- && !within(addr, mod->module_core, mod->core_text_size))
61461+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
61462 mod = NULL;
61463 }
61464 return mod;
61465diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
61466--- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
61467+++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
61468@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
61469 spin_lock_mutex(&lock->wait_lock, flags);
61470
61471 debug_mutex_lock_common(lock, &waiter);
61472- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
61473+ debug_mutex_add_waiter(lock, &waiter, task);
61474
61475 /* add waiting tasks to the end of the waitqueue (FIFO): */
61476 list_add_tail(&waiter.list, &lock->wait_list);
61477@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
61478 * TASK_UNINTERRUPTIBLE case.)
61479 */
61480 if (unlikely(signal_pending_state(state, task))) {
61481- mutex_remove_waiter(lock, &waiter,
61482- task_thread_info(task));
61483+ mutex_remove_waiter(lock, &waiter, task);
61484 mutex_release(&lock->dep_map, 1, ip);
61485 spin_unlock_mutex(&lock->wait_lock, flags);
61486
61487@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
61488 done:
61489 lock_acquired(&lock->dep_map, ip);
61490 /* got the lock - rejoice! */
61491- mutex_remove_waiter(lock, &waiter, current_thread_info());
61492+ mutex_remove_waiter(lock, &waiter, task);
61493 mutex_set_owner(lock);
61494
61495 /* set it to 0 if there are no waiters left: */
61496diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
61497--- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
61498+++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
61499@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
61500 }
61501
61502 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61503- struct thread_info *ti)
61504+ struct task_struct *task)
61505 {
61506 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
61507
61508 /* Mark the current thread as blocked on the lock: */
61509- ti->task->blocked_on = waiter;
61510+ task->blocked_on = waiter;
61511 }
61512
61513 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61514- struct thread_info *ti)
61515+ struct task_struct *task)
61516 {
61517 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
61518- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
61519- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
61520- ti->task->blocked_on = NULL;
61521+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
61522+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
61523+ task->blocked_on = NULL;
61524
61525 list_del_init(&waiter->list);
61526 waiter->task = NULL;
61527diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
61528--- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
61529+++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
61530@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
61531 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
61532 extern void debug_mutex_add_waiter(struct mutex *lock,
61533 struct mutex_waiter *waiter,
61534- struct thread_info *ti);
61535+ struct task_struct *task);
61536 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61537- struct thread_info *ti);
61538+ struct task_struct *task);
61539 extern void debug_mutex_unlock(struct mutex *lock);
61540 extern void debug_mutex_init(struct mutex *lock, const char *name,
61541 struct lock_class_key *key);
61542diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
61543--- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
61544+++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
61545@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
61546 padata->pd = pd;
61547 padata->cb_cpu = cb_cpu;
61548
61549- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
61550- atomic_set(&pd->seq_nr, -1);
61551+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
61552+ atomic_set_unchecked(&pd->seq_nr, -1);
61553
61554- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
61555+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
61556
61557 target_cpu = padata_cpu_hash(padata);
61558 queue = per_cpu_ptr(pd->pqueue, target_cpu);
61559@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
61560 padata_init_pqueues(pd);
61561 padata_init_squeues(pd);
61562 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
61563- atomic_set(&pd->seq_nr, -1);
61564+ atomic_set_unchecked(&pd->seq_nr, -1);
61565 atomic_set(&pd->reorder_objects, 0);
61566 atomic_set(&pd->refcnt, 0);
61567 pd->pinst = pinst;
61568diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
61569--- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
61570+++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
61571@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
61572 const char *board;
61573
61574 printk(KERN_WARNING "------------[ cut here ]------------\n");
61575- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61576+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61577 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61578 if (board)
61579 printk(KERN_WARNING "Hardware name: %s\n", board);
61580@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61581 */
61582 void __stack_chk_fail(void)
61583 {
61584- panic("stack-protector: Kernel stack is corrupted in: %p\n",
61585+ dump_stack();
61586+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61587 __builtin_return_address(0));
61588 }
61589 EXPORT_SYMBOL(__stack_chk_fail);
61590diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
61591--- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
61592+++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
61593@@ -33,6 +33,7 @@
61594 #include <linux/rculist.h>
61595 #include <linux/bootmem.h>
61596 #include <linux/hash.h>
61597+#include <linux/security.h>
61598 #include <linux/pid_namespace.h>
61599 #include <linux/init_task.h>
61600 #include <linux/syscalls.h>
61601@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61602
61603 int pid_max = PID_MAX_DEFAULT;
61604
61605-#define RESERVED_PIDS 300
61606+#define RESERVED_PIDS 500
61607
61608 int pid_max_min = RESERVED_PIDS + 1;
61609 int pid_max_max = PID_MAX_LIMIT;
61610@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
61611 */
61612 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61613 {
61614+ struct task_struct *task;
61615+
61616 rcu_lockdep_assert(rcu_read_lock_held());
61617- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61618+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61619+
61620+ if (gr_pid_is_chrooted(task))
61621+ return NULL;
61622+
61623+ return task;
61624 }
61625
61626 struct task_struct *find_task_by_vpid(pid_t vnr)
61627@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
61628 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61629 }
61630
61631+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61632+{
61633+ rcu_lockdep_assert(rcu_read_lock_held());
61634+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61635+}
61636+
61637 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61638 {
61639 struct pid *pid;
61640diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
61641--- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
61642+++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
61643@@ -6,6 +6,7 @@
61644 #include <linux/posix-timers.h>
61645 #include <linux/errno.h>
61646 #include <linux/math64.h>
61647+#include <linux/security.h>
61648 #include <asm/uaccess.h>
61649 #include <linux/kernel_stat.h>
61650 #include <trace/events/timer.h>
61651@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
61652
61653 static __init int init_posix_cpu_timers(void)
61654 {
61655- struct k_clock process = {
61656+ static struct k_clock process = {
61657 .clock_getres = process_cpu_clock_getres,
61658 .clock_get = process_cpu_clock_get,
61659 .timer_create = process_cpu_timer_create,
61660 .nsleep = process_cpu_nsleep,
61661 .nsleep_restart = process_cpu_nsleep_restart,
61662 };
61663- struct k_clock thread = {
61664+ static struct k_clock thread = {
61665 .clock_getres = thread_cpu_clock_getres,
61666 .clock_get = thread_cpu_clock_get,
61667 .timer_create = thread_cpu_timer_create,
61668diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
61669--- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
61670+++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
61671@@ -43,6 +43,7 @@
61672 #include <linux/idr.h>
61673 #include <linux/posix-clock.h>
61674 #include <linux/posix-timers.h>
61675+#include <linux/grsecurity.h>
61676 #include <linux/syscalls.h>
61677 #include <linux/wait.h>
61678 #include <linux/workqueue.h>
61679@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
61680 * which we beg off on and pass to do_sys_settimeofday().
61681 */
61682
61683-static struct k_clock posix_clocks[MAX_CLOCKS];
61684+static struct k_clock *posix_clocks[MAX_CLOCKS];
61685
61686 /*
61687 * These ones are defined below.
61688@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
61689 */
61690 static __init int init_posix_timers(void)
61691 {
61692- struct k_clock clock_realtime = {
61693+ static struct k_clock clock_realtime = {
61694 .clock_getres = hrtimer_get_res,
61695 .clock_get = posix_clock_realtime_get,
61696 .clock_set = posix_clock_realtime_set,
61697@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
61698 .timer_get = common_timer_get,
61699 .timer_del = common_timer_del,
61700 };
61701- struct k_clock clock_monotonic = {
61702+ static struct k_clock clock_monotonic = {
61703 .clock_getres = hrtimer_get_res,
61704 .clock_get = posix_ktime_get_ts,
61705 .nsleep = common_nsleep,
61706@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
61707 .timer_get = common_timer_get,
61708 .timer_del = common_timer_del,
61709 };
61710- struct k_clock clock_monotonic_raw = {
61711+ static struct k_clock clock_monotonic_raw = {
61712 .clock_getres = hrtimer_get_res,
61713 .clock_get = posix_get_monotonic_raw,
61714 };
61715- struct k_clock clock_realtime_coarse = {
61716+ static struct k_clock clock_realtime_coarse = {
61717 .clock_getres = posix_get_coarse_res,
61718 .clock_get = posix_get_realtime_coarse,
61719 };
61720- struct k_clock clock_monotonic_coarse = {
61721+ static struct k_clock clock_monotonic_coarse = {
61722 .clock_getres = posix_get_coarse_res,
61723 .clock_get = posix_get_monotonic_coarse,
61724 };
61725- struct k_clock clock_boottime = {
61726+ static struct k_clock clock_boottime = {
61727 .clock_getres = hrtimer_get_res,
61728 .clock_get = posix_get_boottime,
61729 .nsleep = common_nsleep,
61730@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
61731 .timer_del = common_timer_del,
61732 };
61733
61734+ pax_track_stack();
61735+
61736 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
61737 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
61738 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61739@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
61740 return;
61741 }
61742
61743- posix_clocks[clock_id] = *new_clock;
61744+ posix_clocks[clock_id] = new_clock;
61745 }
61746 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
61747
61748@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
61749 return (id & CLOCKFD_MASK) == CLOCKFD ?
61750 &clock_posix_dynamic : &clock_posix_cpu;
61751
61752- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
61753+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
61754 return NULL;
61755- return &posix_clocks[id];
61756+ return posix_clocks[id];
61757 }
61758
61759 static int common_timer_create(struct k_itimer *new_timer)
61760@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61761 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61762 return -EFAULT;
61763
61764+ /* only the CLOCK_REALTIME clock can be set, all other clocks
61765+ have their clock_set fptr set to a nosettime dummy function
61766+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61767+ call common_clock_set, which calls do_sys_settimeofday, which
61768+ we hook
61769+ */
61770+
61771 return kc->clock_set(which_clock, &new_tp);
61772 }
61773
61774diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
61775--- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
61776+++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
61777@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61778 .enable_mask = SYSRQ_ENABLE_BOOT,
61779 };
61780
61781-static int pm_sysrq_init(void)
61782+static int __init pm_sysrq_init(void)
61783 {
61784 register_sysrq_key('o', &sysrq_poweroff_op);
61785 return 0;
61786diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
61787--- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
61788+++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
61789@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
61790 u64 elapsed_csecs64;
61791 unsigned int elapsed_csecs;
61792 bool wakeup = false;
61793+ bool timedout = false;
61794
61795 do_gettimeofday(&start);
61796
61797@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
61798
61799 while (true) {
61800 todo = 0;
61801+ if (time_after(jiffies, end_time))
61802+ timedout = true;
61803 read_lock(&tasklist_lock);
61804 do_each_thread(g, p) {
61805 if (frozen(p) || !freezable(p))
61806@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
61807 * try_to_stop() after schedule() in ptrace/signal
61808 * stop sees TIF_FREEZE.
61809 */
61810- if (!task_is_stopped_or_traced(p) &&
61811- !freezer_should_skip(p))
61812+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61813 todo++;
61814+ if (timedout) {
61815+ printk(KERN_ERR "Task refusing to freeze:\n");
61816+ sched_show_task(p);
61817+ }
61818+ }
61819 } while_each_thread(g, p);
61820 read_unlock(&tasklist_lock);
61821
61822@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
61823 todo += wq_busy;
61824 }
61825
61826- if (!todo || time_after(jiffies, end_time))
61827+ if (!todo || timedout)
61828 break;
61829
61830 if (pm_wakeup_pending()) {
61831diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
61832--- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
61833+++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
61834@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
61835 if (from_file && type != SYSLOG_ACTION_OPEN)
61836 return 0;
61837
61838+#ifdef CONFIG_GRKERNSEC_DMESG
61839+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
61840+ return -EPERM;
61841+#endif
61842+
61843 if (syslog_action_restricted(type)) {
61844 if (capable(CAP_SYSLOG))
61845 return 0;
61846 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
61847 if (capable(CAP_SYS_ADMIN)) {
61848- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
61849+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
61850 "but no CAP_SYSLOG (deprecated).\n");
61851 return 0;
61852 }
61853diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
61854--- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
61855+++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
61856@@ -39,7 +39,7 @@ struct profile_hit {
61857 /* Oprofile timer tick hook */
61858 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61859
61860-static atomic_t *prof_buffer;
61861+static atomic_unchecked_t *prof_buffer;
61862 static unsigned long prof_len, prof_shift;
61863
61864 int prof_on __read_mostly;
61865@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
61866 hits[i].pc = 0;
61867 continue;
61868 }
61869- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61870+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61871 hits[i].hits = hits[i].pc = 0;
61872 }
61873 }
61874@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
61875 * Add the current hit(s) and flush the write-queue out
61876 * to the global buffer:
61877 */
61878- atomic_add(nr_hits, &prof_buffer[pc]);
61879+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61880 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61881- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61882+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61883 hits[i].pc = hits[i].hits = 0;
61884 }
61885 out:
61886@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
61887 {
61888 unsigned long pc;
61889 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61890- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61891+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61892 }
61893 #endif /* !CONFIG_SMP */
61894
61895@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61896 return -EFAULT;
61897 buf++; p++; count--; read++;
61898 }
61899- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61900+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61901 if (copy_to_user(buf, (void *)pnt, count))
61902 return -EFAULT;
61903 read += count;
61904@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61905 }
61906 #endif
61907 profile_discard_flip_buffers();
61908- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61909+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61910 return count;
61911 }
61912
61913diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
61914--- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
61915+++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
61916@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
61917 return ret;
61918 }
61919
61920-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61921+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61922+ unsigned int log)
61923 {
61924 const struct cred *cred = current_cred(), *tcred;
61925
61926@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
61927 cred->gid == tcred->sgid &&
61928 cred->gid == tcred->gid))
61929 goto ok;
61930- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
61931+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
61932+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
61933 goto ok;
61934 rcu_read_unlock();
61935 return -EPERM;
61936@@ -167,7 +169,9 @@ ok:
61937 smp_rmb();
61938 if (task->mm)
61939 dumpable = get_dumpable(task->mm);
61940- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
61941+ if (!dumpable &&
61942+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
61943+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
61944 return -EPERM;
61945
61946 return security_ptrace_access_check(task, mode);
61947@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
61948 {
61949 int err;
61950 task_lock(task);
61951- err = __ptrace_may_access(task, mode);
61952+ err = __ptrace_may_access(task, mode, 0);
61953+ task_unlock(task);
61954+ return !err;
61955+}
61956+
61957+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61958+{
61959+ int err;
61960+ task_lock(task);
61961+ err = __ptrace_may_access(task, mode, 1);
61962 task_unlock(task);
61963 return !err;
61964 }
61965@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
61966 goto out;
61967
61968 task_lock(task);
61969- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61970+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61971 task_unlock(task);
61972 if (retval)
61973 goto unlock_creds;
61974@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
61975 goto unlock_tasklist;
61976
61977 task->ptrace = PT_PTRACED;
61978- if (task_ns_capable(task, CAP_SYS_PTRACE))
61979+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
61980 task->ptrace |= PT_PTRACE_CAP;
61981
61982 __ptrace_link(task, current);
61983@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
61984 {
61985 int copied = 0;
61986
61987+ pax_track_stack();
61988+
61989 while (len > 0) {
61990 char buf[128];
61991 int this_len, retval;
61992@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
61993 break;
61994 return -EIO;
61995 }
61996- if (copy_to_user(dst, buf, retval))
61997+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
61998 return -EFAULT;
61999 copied += retval;
62000 src += retval;
62001@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
62002 {
62003 int copied = 0;
62004
62005+ pax_track_stack();
62006+
62007 while (len > 0) {
62008 char buf[128];
62009 int this_len, retval;
62010@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
62011 {
62012 int ret = -EIO;
62013 siginfo_t siginfo;
62014- void __user *datavp = (void __user *) data;
62015+ void __user *datavp = (__force void __user *) data;
62016 unsigned long __user *datalp = datavp;
62017
62018+ pax_track_stack();
62019+
62020 switch (request) {
62021 case PTRACE_PEEKTEXT:
62022 case PTRACE_PEEKDATA:
62023@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
62024 goto out;
62025 }
62026
62027+ if (gr_handle_ptrace(child, request)) {
62028+ ret = -EPERM;
62029+ goto out_put_task_struct;
62030+ }
62031+
62032 if (request == PTRACE_ATTACH) {
62033 ret = ptrace_attach(child);
62034 /*
62035 * Some architectures need to do book-keeping after
62036 * a ptrace attach.
62037 */
62038- if (!ret)
62039+ if (!ret) {
62040 arch_ptrace_attach(child);
62041+ gr_audit_ptrace(child);
62042+ }
62043 goto out_put_task_struct;
62044 }
62045
62046@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
62047 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
62048 if (copied != sizeof(tmp))
62049 return -EIO;
62050- return put_user(tmp, (unsigned long __user *)data);
62051+ return put_user(tmp, (__force unsigned long __user *)data);
62052 }
62053
62054 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
62055@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
62056 siginfo_t siginfo;
62057 int ret;
62058
62059+ pax_track_stack();
62060+
62061 switch (request) {
62062 case PTRACE_PEEKTEXT:
62063 case PTRACE_PEEKDATA:
62064@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
62065 goto out;
62066 }
62067
62068+ if (gr_handle_ptrace(child, request)) {
62069+ ret = -EPERM;
62070+ goto out_put_task_struct;
62071+ }
62072+
62073 if (request == PTRACE_ATTACH) {
62074 ret = ptrace_attach(child);
62075 /*
62076 * Some architectures need to do book-keeping after
62077 * a ptrace attach.
62078 */
62079- if (!ret)
62080+ if (!ret) {
62081 arch_ptrace_attach(child);
62082+ gr_audit_ptrace(child);
62083+ }
62084 goto out_put_task_struct;
62085 }
62086
62087diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
62088--- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
62089+++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
62090@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
62091 { 0 };
62092 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
62093 { 0 };
62094-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62095-static atomic_t n_rcu_torture_alloc;
62096-static atomic_t n_rcu_torture_alloc_fail;
62097-static atomic_t n_rcu_torture_free;
62098-static atomic_t n_rcu_torture_mberror;
62099-static atomic_t n_rcu_torture_error;
62100+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62101+static atomic_unchecked_t n_rcu_torture_alloc;
62102+static atomic_unchecked_t n_rcu_torture_alloc_fail;
62103+static atomic_unchecked_t n_rcu_torture_free;
62104+static atomic_unchecked_t n_rcu_torture_mberror;
62105+static atomic_unchecked_t n_rcu_torture_error;
62106 static long n_rcu_torture_boost_ktrerror;
62107 static long n_rcu_torture_boost_rterror;
62108 static long n_rcu_torture_boost_failure;
62109@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
62110
62111 spin_lock_bh(&rcu_torture_lock);
62112 if (list_empty(&rcu_torture_freelist)) {
62113- atomic_inc(&n_rcu_torture_alloc_fail);
62114+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
62115 spin_unlock_bh(&rcu_torture_lock);
62116 return NULL;
62117 }
62118- atomic_inc(&n_rcu_torture_alloc);
62119+ atomic_inc_unchecked(&n_rcu_torture_alloc);
62120 p = rcu_torture_freelist.next;
62121 list_del_init(p);
62122 spin_unlock_bh(&rcu_torture_lock);
62123@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
62124 static void
62125 rcu_torture_free(struct rcu_torture *p)
62126 {
62127- atomic_inc(&n_rcu_torture_free);
62128+ atomic_inc_unchecked(&n_rcu_torture_free);
62129 spin_lock_bh(&rcu_torture_lock);
62130 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
62131 spin_unlock_bh(&rcu_torture_lock);
62132@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
62133 i = rp->rtort_pipe_count;
62134 if (i > RCU_TORTURE_PIPE_LEN)
62135 i = RCU_TORTURE_PIPE_LEN;
62136- atomic_inc(&rcu_torture_wcount[i]);
62137+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62138 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62139 rp->rtort_mbtest = 0;
62140 rcu_torture_free(rp);
62141@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
62142 i = rp->rtort_pipe_count;
62143 if (i > RCU_TORTURE_PIPE_LEN)
62144 i = RCU_TORTURE_PIPE_LEN;
62145- atomic_inc(&rcu_torture_wcount[i]);
62146+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62147 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62148 rp->rtort_mbtest = 0;
62149 list_del(&rp->rtort_free);
62150@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
62151 i = old_rp->rtort_pipe_count;
62152 if (i > RCU_TORTURE_PIPE_LEN)
62153 i = RCU_TORTURE_PIPE_LEN;
62154- atomic_inc(&rcu_torture_wcount[i]);
62155+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62156 old_rp->rtort_pipe_count++;
62157 cur_ops->deferred_free(old_rp);
62158 }
62159@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
62160 return;
62161 }
62162 if (p->rtort_mbtest == 0)
62163- atomic_inc(&n_rcu_torture_mberror);
62164+ atomic_inc_unchecked(&n_rcu_torture_mberror);
62165 spin_lock(&rand_lock);
62166 cur_ops->read_delay(&rand);
62167 n_rcu_torture_timers++;
62168@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
62169 continue;
62170 }
62171 if (p->rtort_mbtest == 0)
62172- atomic_inc(&n_rcu_torture_mberror);
62173+ atomic_inc_unchecked(&n_rcu_torture_mberror);
62174 cur_ops->read_delay(&rand);
62175 preempt_disable();
62176 pipe_count = p->rtort_pipe_count;
62177@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
62178 rcu_torture_current,
62179 rcu_torture_current_version,
62180 list_empty(&rcu_torture_freelist),
62181- atomic_read(&n_rcu_torture_alloc),
62182- atomic_read(&n_rcu_torture_alloc_fail),
62183- atomic_read(&n_rcu_torture_free),
62184- atomic_read(&n_rcu_torture_mberror),
62185+ atomic_read_unchecked(&n_rcu_torture_alloc),
62186+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
62187+ atomic_read_unchecked(&n_rcu_torture_free),
62188+ atomic_read_unchecked(&n_rcu_torture_mberror),
62189 n_rcu_torture_boost_ktrerror,
62190 n_rcu_torture_boost_rterror,
62191 n_rcu_torture_boost_failure,
62192 n_rcu_torture_boosts,
62193 n_rcu_torture_timers);
62194- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
62195+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
62196 n_rcu_torture_boost_ktrerror != 0 ||
62197 n_rcu_torture_boost_rterror != 0 ||
62198 n_rcu_torture_boost_failure != 0)
62199@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
62200 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
62201 if (i > 1) {
62202 cnt += sprintf(&page[cnt], "!!! ");
62203- atomic_inc(&n_rcu_torture_error);
62204+ atomic_inc_unchecked(&n_rcu_torture_error);
62205 WARN_ON_ONCE(1);
62206 }
62207 cnt += sprintf(&page[cnt], "Reader Pipe: ");
62208@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
62209 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
62210 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62211 cnt += sprintf(&page[cnt], " %d",
62212- atomic_read(&rcu_torture_wcount[i]));
62213+ atomic_read_unchecked(&rcu_torture_wcount[i]));
62214 }
62215 cnt += sprintf(&page[cnt], "\n");
62216 if (cur_ops->stats)
62217@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
62218
62219 if (cur_ops->cleanup)
62220 cur_ops->cleanup();
62221- if (atomic_read(&n_rcu_torture_error))
62222+ if (atomic_read_unchecked(&n_rcu_torture_error))
62223 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
62224 else
62225 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
62226@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
62227
62228 rcu_torture_current = NULL;
62229 rcu_torture_current_version = 0;
62230- atomic_set(&n_rcu_torture_alloc, 0);
62231- atomic_set(&n_rcu_torture_alloc_fail, 0);
62232- atomic_set(&n_rcu_torture_free, 0);
62233- atomic_set(&n_rcu_torture_mberror, 0);
62234- atomic_set(&n_rcu_torture_error, 0);
62235+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
62236+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
62237+ atomic_set_unchecked(&n_rcu_torture_free, 0);
62238+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
62239+ atomic_set_unchecked(&n_rcu_torture_error, 0);
62240 n_rcu_torture_boost_ktrerror = 0;
62241 n_rcu_torture_boost_rterror = 0;
62242 n_rcu_torture_boost_failure = 0;
62243 n_rcu_torture_boosts = 0;
62244 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
62245- atomic_set(&rcu_torture_wcount[i], 0);
62246+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
62247 for_each_possible_cpu(cpu) {
62248 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62249 per_cpu(rcu_torture_count, cpu)[i] = 0;
62250diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
62251--- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
62252+++ linux-3.0.4/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
62253@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
62254 }
62255 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
62256 smp_mb__before_atomic_inc(); /* See above. */
62257- atomic_inc(&rdtp->dynticks);
62258+ atomic_inc_unchecked(&rdtp->dynticks);
62259 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
62260- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
62261+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
62262 local_irq_restore(flags);
62263
62264 /* If the interrupt queued a callback, get out of dyntick mode. */
62265@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
62266 return;
62267 }
62268 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
62269- atomic_inc(&rdtp->dynticks);
62270+ atomic_inc_unchecked(&rdtp->dynticks);
62271 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
62272 smp_mb__after_atomic_inc(); /* See above. */
62273- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
62274+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
62275 local_irq_restore(flags);
62276 }
62277
62278@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
62279 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
62280
62281 if (rdtp->dynticks_nmi_nesting == 0 &&
62282- (atomic_read(&rdtp->dynticks) & 0x1))
62283+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
62284 return;
62285 rdtp->dynticks_nmi_nesting++;
62286 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
62287- atomic_inc(&rdtp->dynticks);
62288+ atomic_inc_unchecked(&rdtp->dynticks);
62289 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
62290 smp_mb__after_atomic_inc(); /* See above. */
62291- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
62292+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
62293 }
62294
62295 /**
62296@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
62297 return;
62298 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
62299 smp_mb__before_atomic_inc(); /* See above. */
62300- atomic_inc(&rdtp->dynticks);
62301+ atomic_inc_unchecked(&rdtp->dynticks);
62302 smp_mb__after_atomic_inc(); /* Force delay to next write. */
62303- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
62304+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
62305 }
62306
62307 /**
62308@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
62309 */
62310 static int dyntick_save_progress_counter(struct rcu_data *rdp)
62311 {
62312- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
62313+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
62314 return 0;
62315 }
62316
62317@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
62318 unsigned long curr;
62319 unsigned long snap;
62320
62321- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
62322+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
62323 snap = (unsigned long)rdp->dynticks_snap;
62324
62325 /*
62326@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
62327 /*
62328 * Do softirq processing for the current CPU.
62329 */
62330-static void rcu_process_callbacks(struct softirq_action *unused)
62331+static void rcu_process_callbacks(void)
62332 {
62333 __rcu_process_callbacks(&rcu_sched_state,
62334 &__get_cpu_var(rcu_sched_data));
62335diff -urNp linux-3.0.4/kernel/rcutree.h linux-3.0.4/kernel/rcutree.h
62336--- linux-3.0.4/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
62337+++ linux-3.0.4/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
62338@@ -86,7 +86,7 @@
62339 struct rcu_dynticks {
62340 int dynticks_nesting; /* Track irq/process nesting level. */
62341 int dynticks_nmi_nesting; /* Track NMI nesting level. */
62342- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
62343+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
62344 };
62345
62346 /* RCU's kthread states for tracing. */
62347diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
62348--- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
62349+++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
62350@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
62351
62352 /* Clean up and exit. */
62353 smp_mb(); /* ensure expedited GP seen before counter increment. */
62354- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
62355+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
62356 unlock_mb_ret:
62357 mutex_unlock(&sync_rcu_preempt_exp_mutex);
62358 mb_ret:
62359@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
62360
62361 #else /* #ifndef CONFIG_SMP */
62362
62363-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
62364-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
62365+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
62366+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
62367
62368 static int synchronize_sched_expedited_cpu_stop(void *data)
62369 {
62370@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
62371 int firstsnap, s, snap, trycount = 0;
62372
62373 /* Note that atomic_inc_return() implies full memory barrier. */
62374- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
62375+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
62376 get_online_cpus();
62377
62378 /*
62379@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
62380 }
62381
62382 /* Check to see if someone else did our work for us. */
62383- s = atomic_read(&sync_sched_expedited_done);
62384+ s = atomic_read_unchecked(&sync_sched_expedited_done);
62385 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
62386 smp_mb(); /* ensure test happens before caller kfree */
62387 return;
62388@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
62389 * grace period works for us.
62390 */
62391 get_online_cpus();
62392- snap = atomic_read(&sync_sched_expedited_started) - 1;
62393+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
62394 smp_mb(); /* ensure read is before try_stop_cpus(). */
62395 }
62396
62397@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
62398 * than we did beat us to the punch.
62399 */
62400 do {
62401- s = atomic_read(&sync_sched_expedited_done);
62402+ s = atomic_read_unchecked(&sync_sched_expedited_done);
62403 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
62404 smp_mb(); /* ensure test happens before caller kfree */
62405 break;
62406 }
62407- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
62408+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
62409
62410 put_online_cpus();
62411 }
62412diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
62413--- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
62414+++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
62415@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
62416 };
62417 ssize_t ret;
62418
62419+ pax_track_stack();
62420+
62421 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
62422 return 0;
62423 if (splice_grow_spd(pipe, &spd))
62424diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
62425--- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
62426+++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
62427@@ -141,8 +141,18 @@ static const struct file_operations proc
62428
62429 static int __init ioresources_init(void)
62430 {
62431+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62432+#ifdef CONFIG_GRKERNSEC_PROC_USER
62433+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
62434+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
62435+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62436+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
62437+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
62438+#endif
62439+#else
62440 proc_create("ioports", 0, NULL, &proc_ioports_operations);
62441 proc_create("iomem", 0, NULL, &proc_iomem_operations);
62442+#endif
62443 return 0;
62444 }
62445 __initcall(ioresources_init);
62446diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
62447--- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
62448+++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
62449@@ -20,7 +20,7 @@
62450 #define MAX_RT_TEST_MUTEXES 8
62451
62452 static spinlock_t rttest_lock;
62453-static atomic_t rttest_event;
62454+static atomic_unchecked_t rttest_event;
62455
62456 struct test_thread_data {
62457 int opcode;
62458@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
62459
62460 case RTTEST_LOCKCONT:
62461 td->mutexes[td->opdata] = 1;
62462- td->event = atomic_add_return(1, &rttest_event);
62463+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62464 return 0;
62465
62466 case RTTEST_RESET:
62467@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
62468 return 0;
62469
62470 case RTTEST_RESETEVENT:
62471- atomic_set(&rttest_event, 0);
62472+ atomic_set_unchecked(&rttest_event, 0);
62473 return 0;
62474
62475 default:
62476@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
62477 return ret;
62478
62479 td->mutexes[id] = 1;
62480- td->event = atomic_add_return(1, &rttest_event);
62481+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62482 rt_mutex_lock(&mutexes[id]);
62483- td->event = atomic_add_return(1, &rttest_event);
62484+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62485 td->mutexes[id] = 4;
62486 return 0;
62487
62488@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
62489 return ret;
62490
62491 td->mutexes[id] = 1;
62492- td->event = atomic_add_return(1, &rttest_event);
62493+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62494 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
62495- td->event = atomic_add_return(1, &rttest_event);
62496+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62497 td->mutexes[id] = ret ? 0 : 4;
62498 return ret ? -EINTR : 0;
62499
62500@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
62501 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
62502 return ret;
62503
62504- td->event = atomic_add_return(1, &rttest_event);
62505+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62506 rt_mutex_unlock(&mutexes[id]);
62507- td->event = atomic_add_return(1, &rttest_event);
62508+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62509 td->mutexes[id] = 0;
62510 return 0;
62511
62512@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
62513 break;
62514
62515 td->mutexes[dat] = 2;
62516- td->event = atomic_add_return(1, &rttest_event);
62517+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62518 break;
62519
62520 default:
62521@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
62522 return;
62523
62524 td->mutexes[dat] = 3;
62525- td->event = atomic_add_return(1, &rttest_event);
62526+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62527 break;
62528
62529 case RTTEST_LOCKNOWAIT:
62530@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
62531 return;
62532
62533 td->mutexes[dat] = 1;
62534- td->event = atomic_add_return(1, &rttest_event);
62535+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62536 return;
62537
62538 default:
62539diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
62540--- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
62541+++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
62542@@ -7,7 +7,7 @@
62543
62544 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
62545 static struct autogroup autogroup_default;
62546-static atomic_t autogroup_seq_nr;
62547+static atomic_unchecked_t autogroup_seq_nr;
62548
62549 static void __init autogroup_init(struct task_struct *init_task)
62550 {
62551@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
62552
62553 kref_init(&ag->kref);
62554 init_rwsem(&ag->lock);
62555- ag->id = atomic_inc_return(&autogroup_seq_nr);
62556+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
62557 ag->tg = tg;
62558 #ifdef CONFIG_RT_GROUP_SCHED
62559 /*
62560diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
62561--- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
62562+++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
62563@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
62564 struct rq *rq;
62565 int cpu;
62566
62567+ pax_track_stack();
62568+
62569 need_resched:
62570 preempt_disable();
62571 cpu = smp_processor_id();
62572@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
62573 /* convert nice value [19,-20] to rlimit style value [1,40] */
62574 int nice_rlim = 20 - nice;
62575
62576+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62577+
62578 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
62579 capable(CAP_SYS_NICE));
62580 }
62581@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62582 if (nice > 19)
62583 nice = 19;
62584
62585- if (increment < 0 && !can_nice(current, nice))
62586+ if (increment < 0 && (!can_nice(current, nice) ||
62587+ gr_handle_chroot_nice()))
62588 return -EPERM;
62589
62590 retval = security_task_setnice(current, nice);
62591@@ -5111,6 +5116,7 @@ recheck:
62592 unsigned long rlim_rtprio =
62593 task_rlimit(p, RLIMIT_RTPRIO);
62594
62595+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
62596 /* can't set/change the rt policy */
62597 if (policy != p->policy && !rlim_rtprio)
62598 return -EPERM;
62599diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
62600--- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
62601+++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
62602@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
62603 * run_rebalance_domains is triggered when needed from the scheduler tick.
62604 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
62605 */
62606-static void run_rebalance_domains(struct softirq_action *h)
62607+static void run_rebalance_domains(void)
62608 {
62609 int this_cpu = smp_processor_id();
62610 struct rq *this_rq = cpu_rq(this_cpu);
62611diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
62612--- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
62613+++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
62614@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
62615
62616 int print_fatal_signals __read_mostly;
62617
62618-static void __user *sig_handler(struct task_struct *t, int sig)
62619+static __sighandler_t sig_handler(struct task_struct *t, int sig)
62620 {
62621 return t->sighand->action[sig - 1].sa.sa_handler;
62622 }
62623
62624-static int sig_handler_ignored(void __user *handler, int sig)
62625+static int sig_handler_ignored(__sighandler_t handler, int sig)
62626 {
62627 /* Is it explicitly or implicitly ignored? */
62628 return handler == SIG_IGN ||
62629@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
62630 static int sig_task_ignored(struct task_struct *t, int sig,
62631 int from_ancestor_ns)
62632 {
62633- void __user *handler;
62634+ __sighandler_t handler;
62635
62636 handler = sig_handler(t, sig);
62637
62638@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
62639 atomic_inc(&user->sigpending);
62640 rcu_read_unlock();
62641
62642+ if (!override_rlimit)
62643+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62644+
62645 if (override_rlimit ||
62646 atomic_read(&user->sigpending) <=
62647 task_rlimit(t, RLIMIT_SIGPENDING)) {
62648@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
62649
62650 int unhandled_signal(struct task_struct *tsk, int sig)
62651 {
62652- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62653+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62654 if (is_global_init(tsk))
62655 return 1;
62656 if (handler != SIG_IGN && handler != SIG_DFL)
62657@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
62658 }
62659 }
62660
62661+ /* allow glibc communication via tgkill to other threads in our
62662+ thread group */
62663+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
62664+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
62665+ && gr_handle_signal(t, sig))
62666+ return -EPERM;
62667+
62668 return security_task_kill(t, info, sig, 0);
62669 }
62670
62671@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
62672 return send_signal(sig, info, p, 1);
62673 }
62674
62675-static int
62676+int
62677 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62678 {
62679 return send_signal(sig, info, t, 0);
62680@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
62681 unsigned long int flags;
62682 int ret, blocked, ignored;
62683 struct k_sigaction *action;
62684+ int is_unhandled = 0;
62685
62686 spin_lock_irqsave(&t->sighand->siglock, flags);
62687 action = &t->sighand->action[sig-1];
62688@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
62689 }
62690 if (action->sa.sa_handler == SIG_DFL)
62691 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62692+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62693+ is_unhandled = 1;
62694 ret = specific_send_sig_info(sig, info, t);
62695 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62696
62697+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
62698+ normal operation */
62699+ if (is_unhandled) {
62700+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62701+ gr_handle_crash(t, sig);
62702+ }
62703+
62704 return ret;
62705 }
62706
62707@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
62708 ret = check_kill_permission(sig, info, p);
62709 rcu_read_unlock();
62710
62711- if (!ret && sig)
62712+ if (!ret && sig) {
62713 ret = do_send_sig_info(sig, info, p, true);
62714+ if (!ret)
62715+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62716+ }
62717
62718 return ret;
62719 }
62720@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
62721 {
62722 siginfo_t info;
62723
62724+ pax_track_stack();
62725+
62726 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62727
62728 memset(&info, 0, sizeof info);
62729@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62730 int error = -ESRCH;
62731
62732 rcu_read_lock();
62733- p = find_task_by_vpid(pid);
62734+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62735+ /* allow glibc communication via tgkill to other threads in our
62736+ thread group */
62737+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62738+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
62739+ p = find_task_by_vpid_unrestricted(pid);
62740+ else
62741+#endif
62742+ p = find_task_by_vpid(pid);
62743 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62744 error = check_kill_permission(sig, info, p);
62745 /*
62746diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
62747--- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
62748+++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
62749@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
62750 }
62751 EXPORT_SYMBOL(smp_call_function);
62752
62753-void ipi_call_lock(void)
62754+void ipi_call_lock(void) __acquires(call_function.lock)
62755 {
62756 raw_spin_lock(&call_function.lock);
62757 }
62758
62759-void ipi_call_unlock(void)
62760+void ipi_call_unlock(void) __releases(call_function.lock)
62761 {
62762 raw_spin_unlock(&call_function.lock);
62763 }
62764
62765-void ipi_call_lock_irq(void)
62766+void ipi_call_lock_irq(void) __acquires(call_function.lock)
62767 {
62768 raw_spin_lock_irq(&call_function.lock);
62769 }
62770
62771-void ipi_call_unlock_irq(void)
62772+void ipi_call_unlock_irq(void) __releases(call_function.lock)
62773 {
62774 raw_spin_unlock_irq(&call_function.lock);
62775 }
62776diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
62777--- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
62778+++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
62779@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62780
62781 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62782
62783-char *softirq_to_name[NR_SOFTIRQS] = {
62784+const char * const softirq_to_name[NR_SOFTIRQS] = {
62785 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62786 "TASKLET", "SCHED", "HRTIMER", "RCU"
62787 };
62788@@ -235,7 +235,7 @@ restart:
62789 kstat_incr_softirqs_this_cpu(vec_nr);
62790
62791 trace_softirq_entry(vec_nr);
62792- h->action(h);
62793+ h->action();
62794 trace_softirq_exit(vec_nr);
62795 if (unlikely(prev_count != preempt_count())) {
62796 printk(KERN_ERR "huh, entered softirq %u %s %p"
62797@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
62798 local_irq_restore(flags);
62799 }
62800
62801-void open_softirq(int nr, void (*action)(struct softirq_action *))
62802+void open_softirq(int nr, void (*action)(void))
62803 {
62804- softirq_vec[nr].action = action;
62805+ pax_open_kernel();
62806+ *(void **)&softirq_vec[nr].action = action;
62807+ pax_close_kernel();
62808 }
62809
62810 /*
62811@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
62812
62813 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62814
62815-static void tasklet_action(struct softirq_action *a)
62816+static void tasklet_action(void)
62817 {
62818 struct tasklet_struct *list;
62819
62820@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
62821 }
62822 }
62823
62824-static void tasklet_hi_action(struct softirq_action *a)
62825+static void tasklet_hi_action(void)
62826 {
62827 struct tasklet_struct *list;
62828
62829diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
62830--- linux-3.0.4/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
62831+++ linux-3.0.4/kernel/sys.c 2011-08-29 23:26:27.000000000 -0400
62832@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
62833 error = -EACCES;
62834 goto out;
62835 }
62836+
62837+ if (gr_handle_chroot_setpriority(p, niceval)) {
62838+ error = -EACCES;
62839+ goto out;
62840+ }
62841+
62842 no_nice = security_task_setnice(p, niceval);
62843 if (no_nice) {
62844 error = no_nice;
62845@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62846 goto error;
62847 }
62848
62849+ if (gr_check_group_change(new->gid, new->egid, -1))
62850+ goto error;
62851+
62852 if (rgid != (gid_t) -1 ||
62853 (egid != (gid_t) -1 && egid != old->gid))
62854 new->sgid = new->egid;
62855@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62856 old = current_cred();
62857
62858 retval = -EPERM;
62859+
62860+ if (gr_check_group_change(gid, gid, gid))
62861+ goto error;
62862+
62863 if (nsown_capable(CAP_SETGID))
62864 new->gid = new->egid = new->sgid = new->fsgid = gid;
62865 else if (gid == old->gid || gid == old->sgid)
62866@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
62867 if (!new_user)
62868 return -EAGAIN;
62869
62870+ /*
62871+ * We don't fail in case of NPROC limit excess here because too many
62872+ * poorly written programs don't check set*uid() return code, assuming
62873+ * it never fails if called by root. We may still enforce NPROC limit
62874+ * for programs doing set*uid()+execve() by harmlessly deferring the
62875+ * failure to the execve() stage.
62876+ */
62877 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
62878- new_user != INIT_USER) {
62879- free_uid(new_user);
62880- return -EAGAIN;
62881- }
62882+ new_user != INIT_USER)
62883+ current->flags |= PF_NPROC_EXCEEDED;
62884+ else
62885+ current->flags &= ~PF_NPROC_EXCEEDED;
62886
62887 free_uid(new->user);
62888 new->user = new_user;
62889@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62890 goto error;
62891 }
62892
62893+ if (gr_check_user_change(new->uid, new->euid, -1))
62894+ goto error;
62895+
62896 if (new->uid != old->uid) {
62897 retval = set_user(new);
62898 if (retval < 0)
62899@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62900 old = current_cred();
62901
62902 retval = -EPERM;
62903+
62904+ if (gr_check_crash_uid(uid))
62905+ goto error;
62906+ if (gr_check_user_change(uid, uid, uid))
62907+ goto error;
62908+
62909 if (nsown_capable(CAP_SETUID)) {
62910 new->suid = new->uid = uid;
62911 if (uid != old->uid) {
62912@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62913 goto error;
62914 }
62915
62916+ if (gr_check_user_change(ruid, euid, -1))
62917+ goto error;
62918+
62919 if (ruid != (uid_t) -1) {
62920 new->uid = ruid;
62921 if (ruid != old->uid) {
62922@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62923 goto error;
62924 }
62925
62926+ if (gr_check_group_change(rgid, egid, -1))
62927+ goto error;
62928+
62929 if (rgid != (gid_t) -1)
62930 new->gid = rgid;
62931 if (egid != (gid_t) -1)
62932@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62933 old = current_cred();
62934 old_fsuid = old->fsuid;
62935
62936+ if (gr_check_user_change(-1, -1, uid))
62937+ goto error;
62938+
62939 if (uid == old->uid || uid == old->euid ||
62940 uid == old->suid || uid == old->fsuid ||
62941 nsown_capable(CAP_SETUID)) {
62942@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62943 }
62944 }
62945
62946+error:
62947 abort_creds(new);
62948 return old_fsuid;
62949
62950@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62951 if (gid == old->gid || gid == old->egid ||
62952 gid == old->sgid || gid == old->fsgid ||
62953 nsown_capable(CAP_SETGID)) {
62954+ if (gr_check_group_change(-1, -1, gid))
62955+ goto error;
62956+
62957 if (gid != old_fsgid) {
62958 new->fsgid = gid;
62959 goto change_okay;
62960 }
62961 }
62962
62963+error:
62964 abort_creds(new);
62965 return old_fsgid;
62966
62967@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62968 error = get_dumpable(me->mm);
62969 break;
62970 case PR_SET_DUMPABLE:
62971- if (arg2 < 0 || arg2 > 1) {
62972+ if (arg2 > 1) {
62973 error = -EINVAL;
62974 break;
62975 }
62976diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
62977--- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
62978+++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
62979@@ -85,6 +85,13 @@
62980
62981
62982 #if defined(CONFIG_SYSCTL)
62983+#include <linux/grsecurity.h>
62984+#include <linux/grinternal.h>
62985+
62986+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62987+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62988+ const int op);
62989+extern int gr_handle_chroot_sysctl(const int op);
62990
62991 /* External variables not in a header file. */
62992 extern int sysctl_overcommit_memory;
62993@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
62994 }
62995
62996 #endif
62997+extern struct ctl_table grsecurity_table[];
62998
62999 static struct ctl_table root_table[];
63000 static struct ctl_table_root sysctl_table_root;
63001@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
63002 int sysctl_legacy_va_layout;
63003 #endif
63004
63005+#ifdef CONFIG_PAX_SOFTMODE
63006+static ctl_table pax_table[] = {
63007+ {
63008+ .procname = "softmode",
63009+ .data = &pax_softmode,
63010+ .maxlen = sizeof(unsigned int),
63011+ .mode = 0600,
63012+ .proc_handler = &proc_dointvec,
63013+ },
63014+
63015+ { }
63016+};
63017+#endif
63018+
63019 /* The default sysctl tables: */
63020
63021 static struct ctl_table root_table[] = {
63022@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
63023 #endif
63024
63025 static struct ctl_table kern_table[] = {
63026+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
63027+ {
63028+ .procname = "grsecurity",
63029+ .mode = 0500,
63030+ .child = grsecurity_table,
63031+ },
63032+#endif
63033+
63034+#ifdef CONFIG_PAX_SOFTMODE
63035+ {
63036+ .procname = "pax",
63037+ .mode = 0500,
63038+ .child = pax_table,
63039+ },
63040+#endif
63041+
63042 {
63043 .procname = "sched_child_runs_first",
63044 .data = &sysctl_sched_child_runs_first,
63045@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
63046 .data = &modprobe_path,
63047 .maxlen = KMOD_PATH_LEN,
63048 .mode = 0644,
63049- .proc_handler = proc_dostring,
63050+ .proc_handler = proc_dostring_modpriv,
63051 },
63052 {
63053 .procname = "modules_disabled",
63054@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
63055 .extra1 = &zero,
63056 .extra2 = &one,
63057 },
63058+#endif
63059 {
63060 .procname = "kptr_restrict",
63061 .data = &kptr_restrict,
63062 .maxlen = sizeof(int),
63063 .mode = 0644,
63064 .proc_handler = proc_dmesg_restrict,
63065+#ifdef CONFIG_GRKERNSEC_HIDESYM
63066+ .extra1 = &two,
63067+#else
63068 .extra1 = &zero,
63069+#endif
63070 .extra2 = &two,
63071 },
63072-#endif
63073 {
63074 .procname = "ngroups_max",
63075 .data = &ngroups_max,
63076@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
63077 .proc_handler = proc_dointvec_minmax,
63078 .extra1 = &zero,
63079 },
63080+ {
63081+ .procname = "heap_stack_gap",
63082+ .data = &sysctl_heap_stack_gap,
63083+ .maxlen = sizeof(sysctl_heap_stack_gap),
63084+ .mode = 0644,
63085+ .proc_handler = proc_doulongvec_minmax,
63086+ },
63087 #else
63088 {
63089 .procname = "nr_trim_pages",
63090@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
63091 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
63092 {
63093 int mode;
63094+ int error;
63095+
63096+ if (table->parent != NULL && table->parent->procname != NULL &&
63097+ table->procname != NULL &&
63098+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
63099+ return -EACCES;
63100+ if (gr_handle_chroot_sysctl(op))
63101+ return -EACCES;
63102+ error = gr_handle_sysctl(table, op);
63103+ if (error)
63104+ return error;
63105
63106 if (root->permissions)
63107 mode = root->permissions(root, current->nsproxy, table);
63108@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
63109 buffer, lenp, ppos);
63110 }
63111
63112+int proc_dostring_modpriv(struct ctl_table *table, int write,
63113+ void __user *buffer, size_t *lenp, loff_t *ppos)
63114+{
63115+ if (write && !capable(CAP_SYS_MODULE))
63116+ return -EPERM;
63117+
63118+ return _proc_do_string(table->data, table->maxlen, write,
63119+ buffer, lenp, ppos);
63120+}
63121+
63122 static size_t proc_skip_spaces(char **buf)
63123 {
63124 size_t ret;
63125@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
63126 len = strlen(tmp);
63127 if (len > *size)
63128 len = *size;
63129+ if (len > sizeof(tmp))
63130+ len = sizeof(tmp);
63131 if (copy_to_user(*buf, tmp, len))
63132 return -EFAULT;
63133 *size -= len;
63134@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
63135 *i = val;
63136 } else {
63137 val = convdiv * (*i) / convmul;
63138- if (!first)
63139+ if (!first) {
63140 err = proc_put_char(&buffer, &left, '\t');
63141+ if (err)
63142+ break;
63143+ }
63144 err = proc_put_long(&buffer, &left, val, false);
63145 if (err)
63146 break;
63147@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
63148 return -ENOSYS;
63149 }
63150
63151+int proc_dostring_modpriv(struct ctl_table *table, int write,
63152+ void __user *buffer, size_t *lenp, loff_t *ppos)
63153+{
63154+ return -ENOSYS;
63155+}
63156+
63157 int proc_dointvec(struct ctl_table *table, int write,
63158 void __user *buffer, size_t *lenp, loff_t *ppos)
63159 {
63160@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
63161 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
63162 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
63163 EXPORT_SYMBOL(proc_dostring);
63164+EXPORT_SYMBOL(proc_dostring_modpriv);
63165 EXPORT_SYMBOL(proc_doulongvec_minmax);
63166 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
63167 EXPORT_SYMBOL(register_sysctl_table);
63168diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
63169--- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
63170+++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
63171@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
63172 set_fail(&fail, table, "Directory with extra2");
63173 } else {
63174 if ((table->proc_handler == proc_dostring) ||
63175+ (table->proc_handler == proc_dostring_modpriv) ||
63176 (table->proc_handler == proc_dointvec) ||
63177 (table->proc_handler == proc_dointvec_minmax) ||
63178 (table->proc_handler == proc_dointvec_jiffies) ||
63179diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
63180--- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
63181+++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
63182@@ -27,9 +27,12 @@
63183 #include <linux/cgroup.h>
63184 #include <linux/fs.h>
63185 #include <linux/file.h>
63186+#include <linux/grsecurity.h>
63187 #include <net/genetlink.h>
63188 #include <asm/atomic.h>
63189
63190+extern int gr_is_taskstats_denied(int pid);
63191+
63192 /*
63193 * Maximum length of a cpumask that can be specified in
63194 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
63195@@ -558,6 +561,9 @@ err:
63196
63197 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
63198 {
63199+ if (gr_is_taskstats_denied(current->pid))
63200+ return -EACCES;
63201+
63202 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
63203 return cmd_attr_register_cpumask(info);
63204 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
63205diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
63206--- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
63207+++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
63208@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
63209 {
63210 int error = 0;
63211 int i;
63212- struct k_clock alarm_clock = {
63213+ static struct k_clock alarm_clock = {
63214 .clock_getres = alarm_clock_getres,
63215 .clock_get = alarm_clock_get,
63216 .timer_create = alarm_timer_create,
63217diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
63218--- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
63219+++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
63220@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
63221 * then clear the broadcast bit.
63222 */
63223 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
63224- int cpu = smp_processor_id();
63225+ cpu = smp_processor_id();
63226
63227 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
63228 tick_broadcast_clear_oneshot(cpu);
63229diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
63230--- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
63231+++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
63232@@ -14,6 +14,7 @@
63233 #include <linux/init.h>
63234 #include <linux/mm.h>
63235 #include <linux/sched.h>
63236+#include <linux/grsecurity.h>
63237 #include <linux/syscore_ops.h>
63238 #include <linux/clocksource.h>
63239 #include <linux/jiffies.h>
63240@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
63241 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
63242 return -EINVAL;
63243
63244+ gr_log_timechange();
63245+
63246 write_seqlock_irqsave(&xtime_lock, flags);
63247
63248 timekeeping_forward_now();
63249diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
63250--- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
63251+++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
63252@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
63253
63254 static void print_name_offset(struct seq_file *m, void *sym)
63255 {
63256+#ifdef CONFIG_GRKERNSEC_HIDESYM
63257+ SEQ_printf(m, "<%p>", NULL);
63258+#else
63259 char symname[KSYM_NAME_LEN];
63260
63261 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
63262 SEQ_printf(m, "<%pK>", sym);
63263 else
63264 SEQ_printf(m, "%s", symname);
63265+#endif
63266 }
63267
63268 static void
63269@@ -112,7 +116,11 @@ next_one:
63270 static void
63271 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
63272 {
63273+#ifdef CONFIG_GRKERNSEC_HIDESYM
63274+ SEQ_printf(m, " .base: %p\n", NULL);
63275+#else
63276 SEQ_printf(m, " .base: %pK\n", base);
63277+#endif
63278 SEQ_printf(m, " .index: %d\n",
63279 base->index);
63280 SEQ_printf(m, " .resolution: %Lu nsecs\n",
63281@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
63282 {
63283 struct proc_dir_entry *pe;
63284
63285+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63286+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
63287+#else
63288 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
63289+#endif
63290 if (!pe)
63291 return -ENOMEM;
63292 return 0;
63293diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
63294--- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
63295+++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
63296@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
63297 static unsigned long nr_entries;
63298 static struct entry entries[MAX_ENTRIES];
63299
63300-static atomic_t overflow_count;
63301+static atomic_unchecked_t overflow_count;
63302
63303 /*
63304 * The entries are in a hash-table, for fast lookup:
63305@@ -140,7 +140,7 @@ static void reset_entries(void)
63306 nr_entries = 0;
63307 memset(entries, 0, sizeof(entries));
63308 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
63309- atomic_set(&overflow_count, 0);
63310+ atomic_set_unchecked(&overflow_count, 0);
63311 }
63312
63313 static struct entry *alloc_entry(void)
63314@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
63315 if (likely(entry))
63316 entry->count++;
63317 else
63318- atomic_inc(&overflow_count);
63319+ atomic_inc_unchecked(&overflow_count);
63320
63321 out_unlock:
63322 raw_spin_unlock_irqrestore(lock, flags);
63323@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
63324
63325 static void print_name_offset(struct seq_file *m, unsigned long addr)
63326 {
63327+#ifdef CONFIG_GRKERNSEC_HIDESYM
63328+ seq_printf(m, "<%p>", NULL);
63329+#else
63330 char symname[KSYM_NAME_LEN];
63331
63332 if (lookup_symbol_name(addr, symname) < 0)
63333 seq_printf(m, "<%p>", (void *)addr);
63334 else
63335 seq_printf(m, "%s", symname);
63336+#endif
63337 }
63338
63339 static int tstats_show(struct seq_file *m, void *v)
63340@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
63341
63342 seq_puts(m, "Timer Stats Version: v0.2\n");
63343 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
63344- if (atomic_read(&overflow_count))
63345+ if (atomic_read_unchecked(&overflow_count))
63346 seq_printf(m, "Overflow: %d entries\n",
63347- atomic_read(&overflow_count));
63348+ atomic_read_unchecked(&overflow_count));
63349
63350 for (i = 0; i < nr_entries; i++) {
63351 entry = entries + i;
63352@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
63353 {
63354 struct proc_dir_entry *pe;
63355
63356+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63357+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
63358+#else
63359 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
63360+#endif
63361 if (!pe)
63362 return -ENOMEM;
63363 return 0;
63364diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
63365--- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
63366+++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
63367@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
63368 return error;
63369
63370 if (tz) {
63371+ /* we log in do_settimeofday called below, so don't log twice
63372+ */
63373+ if (!tv)
63374+ gr_log_timechange();
63375+
63376 /* SMP safe, global irq locking makes it work. */
63377 sys_tz = *tz;
63378 update_vsyscall_tz();
63379diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
63380--- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
63381+++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
63382@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
63383 /*
63384 * This function runs timers and the timer-tq in bottom half context.
63385 */
63386-static void run_timer_softirq(struct softirq_action *h)
63387+static void run_timer_softirq(void)
63388 {
63389 struct tvec_base *base = __this_cpu_read(tvec_bases);
63390
63391diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
63392--- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
63393+++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
63394@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
63395 struct blk_trace *bt = filp->private_data;
63396 char buf[16];
63397
63398- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
63399+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
63400
63401 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
63402 }
63403@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
63404 return 1;
63405
63406 bt = buf->chan->private_data;
63407- atomic_inc(&bt->dropped);
63408+ atomic_inc_unchecked(&bt->dropped);
63409 return 0;
63410 }
63411
63412@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
63413
63414 bt->dir = dir;
63415 bt->dev = dev;
63416- atomic_set(&bt->dropped, 0);
63417+ atomic_set_unchecked(&bt->dropped, 0);
63418
63419 ret = -EIO;
63420 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
63421diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
63422--- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
63423+++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
63424@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
63425 if (unlikely(ftrace_disabled))
63426 return 0;
63427
63428+ ret = ftrace_arch_code_modify_prepare();
63429+ FTRACE_WARN_ON(ret);
63430+ if (ret)
63431+ return 0;
63432+
63433 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
63434+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
63435 if (ret) {
63436 ftrace_bug(ret, ip);
63437- return 0;
63438 }
63439- return 1;
63440+ return ret ? 0 : 1;
63441 }
63442
63443 /*
63444@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
63445
63446 int
63447 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
63448- void *data)
63449+ void *data)
63450 {
63451 struct ftrace_func_probe *entry;
63452 struct ftrace_page *pg;
63453diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
63454--- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
63455+++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
63456@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
63457 size_t rem;
63458 unsigned int i;
63459
63460+ pax_track_stack();
63461+
63462 if (splice_grow_spd(pipe, &spd))
63463 return -ENOMEM;
63464
63465@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
63466 int entries, size, i;
63467 size_t ret;
63468
63469+ pax_track_stack();
63470+
63471 if (splice_grow_spd(pipe, &spd))
63472 return -ENOMEM;
63473
63474@@ -3990,10 +3994,9 @@ static const struct file_operations trac
63475 };
63476 #endif
63477
63478-static struct dentry *d_tracer;
63479-
63480 struct dentry *tracing_init_dentry(void)
63481 {
63482+ static struct dentry *d_tracer;
63483 static int once;
63484
63485 if (d_tracer)
63486@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
63487 return d_tracer;
63488 }
63489
63490-static struct dentry *d_percpu;
63491-
63492 struct dentry *tracing_dentry_percpu(void)
63493 {
63494+ static struct dentry *d_percpu;
63495 static int once;
63496 struct dentry *d_tracer;
63497
63498diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
63499--- linux-3.0.4/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
63500+++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
63501@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
63502 struct ftrace_module_file_ops {
63503 struct list_head list;
63504 struct module *mod;
63505- struct file_operations id;
63506- struct file_operations enable;
63507- struct file_operations format;
63508- struct file_operations filter;
63509 };
63510
63511 static struct ftrace_module_file_ops *
63512@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
63513
63514 file_ops->mod = mod;
63515
63516- file_ops->id = ftrace_event_id_fops;
63517- file_ops->id.owner = mod;
63518-
63519- file_ops->enable = ftrace_enable_fops;
63520- file_ops->enable.owner = mod;
63521-
63522- file_ops->filter = ftrace_event_filter_fops;
63523- file_ops->filter.owner = mod;
63524-
63525- file_ops->format = ftrace_event_format_fops;
63526- file_ops->format.owner = mod;
63527+ pax_open_kernel();
63528+ *(void **)&mod->trace_id.owner = mod;
63529+ *(void **)&mod->trace_enable.owner = mod;
63530+ *(void **)&mod->trace_filter.owner = mod;
63531+ *(void **)&mod->trace_format.owner = mod;
63532+ pax_close_kernel();
63533
63534 list_add(&file_ops->list, &ftrace_module_file_list);
63535
63536@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
63537
63538 for_each_event(call, start, end) {
63539 __trace_add_event_call(*call, mod,
63540- &file_ops->id, &file_ops->enable,
63541- &file_ops->filter, &file_ops->format);
63542+ &mod->trace_id, &mod->trace_enable,
63543+ &mod->trace_filter, &mod->trace_format);
63544 }
63545 }
63546
63547diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
63548--- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
63549+++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
63550@@ -24,7 +24,7 @@ struct header_iter {
63551 static struct trace_array *mmio_trace_array;
63552 static bool overrun_detected;
63553 static unsigned long prev_overruns;
63554-static atomic_t dropped_count;
63555+static atomic_unchecked_t dropped_count;
63556
63557 static void mmio_reset_data(struct trace_array *tr)
63558 {
63559@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
63560
63561 static unsigned long count_overruns(struct trace_iterator *iter)
63562 {
63563- unsigned long cnt = atomic_xchg(&dropped_count, 0);
63564+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63565 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63566
63567 if (over > prev_overruns)
63568@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
63569 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63570 sizeof(*entry), 0, pc);
63571 if (!event) {
63572- atomic_inc(&dropped_count);
63573+ atomic_inc_unchecked(&dropped_count);
63574 return;
63575 }
63576 entry = ring_buffer_event_data(event);
63577@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
63578 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63579 sizeof(*entry), 0, pc);
63580 if (!event) {
63581- atomic_inc(&dropped_count);
63582+ atomic_inc_unchecked(&dropped_count);
63583 return;
63584 }
63585 entry = ring_buffer_event_data(event);
63586diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
63587--- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
63588+++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
63589@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
63590
63591 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63592 if (!IS_ERR(p)) {
63593- p = mangle_path(s->buffer + s->len, p, "\n");
63594+ p = mangle_path(s->buffer + s->len, p, "\n\\");
63595 if (p) {
63596 s->len = p - s->buffer;
63597 return 1;
63598diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
63599--- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
63600+++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
63601@@ -50,7 +50,7 @@ static inline void check_stack(void)
63602 return;
63603
63604 /* we do not handle interrupt stacks yet */
63605- if (!object_is_on_stack(&this_size))
63606+ if (!object_starts_on_stack(&this_size))
63607 return;
63608
63609 local_irq_save(flags);
63610diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
63611--- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
63612+++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
63613@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
63614 int cpu;
63615 pid_t pid;
63616 /* Can be inserted from interrupt or user context, need to be atomic */
63617- atomic_t inserted;
63618+ atomic_unchecked_t inserted;
63619 /*
63620 * Don't need to be atomic, works are serialized in a single workqueue thread
63621 * on a single CPU.
63622@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
63623 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63624 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63625 if (node->pid == wq_thread->pid) {
63626- atomic_inc(&node->inserted);
63627+ atomic_inc_unchecked(&node->inserted);
63628 goto found;
63629 }
63630 }
63631@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
63632 tsk = get_pid_task(pid, PIDTYPE_PID);
63633 if (tsk) {
63634 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63635- atomic_read(&cws->inserted), cws->executed,
63636+ atomic_read_unchecked(&cws->inserted), cws->executed,
63637 tsk->comm);
63638 put_task_struct(tsk);
63639 }
63640diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
63641--- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
63642+++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
63643@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
63644 return BUG_TRAP_TYPE_NONE;
63645
63646 bug = find_bug(bugaddr);
63647+ if (!bug)
63648+ return BUG_TRAP_TYPE_NONE;
63649
63650 file = NULL;
63651 line = 0;
63652diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
63653--- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
63654+++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
63655@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
63656 if (limit > 4)
63657 return;
63658
63659- is_on_stack = object_is_on_stack(addr);
63660+ is_on_stack = object_starts_on_stack(addr);
63661 if (is_on_stack == onstack)
63662 return;
63663
63664diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
63665--- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
63666+++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
63667@@ -870,7 +870,7 @@ out:
63668
63669 static void check_for_stack(struct device *dev, void *addr)
63670 {
63671- if (object_is_on_stack(addr))
63672+ if (object_starts_on_stack(addr))
63673 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63674 "stack [addr=%p]\n", addr);
63675 }
63676diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
63677--- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
63678+++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
63679@@ -13,6 +13,7 @@
63680 #include <linux/init.h>
63681 #include <linux/sort.h>
63682 #include <asm/uaccess.h>
63683+#include <asm/pgtable.h>
63684
63685 #ifndef ARCH_HAS_SORT_EXTABLE
63686 /*
63687@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
63688 void sort_extable(struct exception_table_entry *start,
63689 struct exception_table_entry *finish)
63690 {
63691+ pax_open_kernel();
63692 sort(start, finish - start, sizeof(struct exception_table_entry),
63693 cmp_ex, NULL);
63694+ pax_close_kernel();
63695 }
63696
63697 #ifdef CONFIG_MODULES
63698diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
63699--- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
63700+++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
63701@@ -269,7 +269,7 @@ static void free(void *where)
63702 malloc_ptr = free_mem_ptr;
63703 }
63704 #else
63705-#define malloc(a) kmalloc(a, GFP_KERNEL)
63706+#define malloc(a) kmalloc((a), GFP_KERNEL)
63707 #define free(a) kfree(a)
63708 #endif
63709
63710diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
63711--- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
63712+++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
63713@@ -1088,6 +1088,7 @@ config LATENCYTOP
63714 depends on DEBUG_KERNEL
63715 depends on STACKTRACE_SUPPORT
63716 depends on PROC_FS
63717+ depends on !GRKERNSEC_HIDESYM
63718 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
63719 select KALLSYMS
63720 select KALLSYMS_ALL
63721diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
63722--- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
63723+++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
63724@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
63725 */
63726 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63727 {
63728- WARN_ON(release == NULL);
63729+ BUG_ON(release == NULL);
63730 WARN_ON(release == (void (*)(struct kref *))kfree);
63731
63732 if (atomic_dec_and_test(&kref->refcount)) {
63733diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
63734--- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
63735+++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
63736@@ -80,7 +80,7 @@ struct radix_tree_preload {
63737 int nr;
63738 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63739 };
63740-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63741+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63742
63743 static inline void *ptr_to_indirect(void *ptr)
63744 {
63745diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
63746--- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
63747+++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
63748@@ -16,6 +16,9 @@
63749 * - scnprintf and vscnprintf
63750 */
63751
63752+#ifdef CONFIG_GRKERNSEC_HIDESYM
63753+#define __INCLUDED_BY_HIDESYM 1
63754+#endif
63755 #include <stdarg.h>
63756 #include <linux/module.h>
63757 #include <linux/types.h>
63758@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
63759 char sym[KSYM_SYMBOL_LEN];
63760 if (ext == 'B')
63761 sprint_backtrace(sym, value);
63762- else if (ext != 'f' && ext != 's')
63763+ else if (ext != 'f' && ext != 's' && ext != 'a')
63764 sprint_symbol(sym, value);
63765 else
63766 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63767@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
63768 return string(buf, end, uuid, spec);
63769 }
63770
63771+#ifdef CONFIG_GRKERNSEC_HIDESYM
63772+int kptr_restrict __read_mostly = 2;
63773+#else
63774 int kptr_restrict __read_mostly;
63775+#endif
63776
63777 /*
63778 * Show a '%p' thing. A kernel extension is that the '%p' is followed
63779@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
63780 * - 'S' For symbolic direct pointers with offset
63781 * - 's' For symbolic direct pointers without offset
63782 * - 'B' For backtraced symbolic direct pointers with offset
63783+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63784+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63785 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
63786 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
63787 * - 'M' For a 6-byte MAC address, it prints the address in the
63788@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
63789 {
63790 if (!ptr && *fmt != 'K') {
63791 /*
63792- * Print (null) with the same width as a pointer so it makes
63793+ * Print (nil) with the same width as a pointer so it makes
63794 * tabular output look nice.
63795 */
63796 if (spec.field_width == -1)
63797 spec.field_width = 2 * sizeof(void *);
63798- return string(buf, end, "(null)", spec);
63799+ return string(buf, end, "(nil)", spec);
63800 }
63801
63802 switch (*fmt) {
63803@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
63804 /* Fallthrough */
63805 case 'S':
63806 case 's':
63807+#ifdef CONFIG_GRKERNSEC_HIDESYM
63808+ break;
63809+#else
63810+ return symbol_string(buf, end, ptr, spec, *fmt);
63811+#endif
63812+ case 'A':
63813+ case 'a':
63814 case 'B':
63815 return symbol_string(buf, end, ptr, spec, *fmt);
63816 case 'R':
63817@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
63818 typeof(type) value; \
63819 if (sizeof(type) == 8) { \
63820 args = PTR_ALIGN(args, sizeof(u32)); \
63821- *(u32 *)&value = *(u32 *)args; \
63822- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63823+ *(u32 *)&value = *(const u32 *)args; \
63824+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63825 } else { \
63826 args = PTR_ALIGN(args, sizeof(type)); \
63827- value = *(typeof(type) *)args; \
63828+ value = *(const typeof(type) *)args; \
63829 } \
63830 args += sizeof(type); \
63831 value; \
63832@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
63833 case FORMAT_TYPE_STR: {
63834 const char *str_arg = args;
63835 args += strlen(str_arg) + 1;
63836- str = string(str, end, (char *)str_arg, spec);
63837+ str = string(str, end, str_arg, spec);
63838 break;
63839 }
63840
63841diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
63842--- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63843+++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
63844@@ -0,0 +1 @@
63845+-grsec
63846diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
63847--- linux-3.0.4/Makefile 2011-09-02 18:11:26.000000000 -0400
63848+++ linux-3.0.4/Makefile 2011-09-17 00:56:07.000000000 -0400
63849@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63850
63851 HOSTCC = gcc
63852 HOSTCXX = g++
63853-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63854-HOSTCXXFLAGS = -O2
63855+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63856+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63857+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63858
63859 # Decide whether to build built-in, modular, or both.
63860 # Normally, just do built-in.
63861@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
63862 KBUILD_CPPFLAGS := -D__KERNEL__
63863
63864 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63865+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
63866 -fno-strict-aliasing -fno-common \
63867 -Werror-implicit-function-declaration \
63868 -Wno-format-security \
63869 -fno-delete-null-pointer-checks
63870+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63871 KBUILD_AFLAGS_KERNEL :=
63872 KBUILD_CFLAGS_KERNEL :=
63873 KBUILD_AFLAGS := -D__ASSEMBLY__
63874@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
63875 # Rules shared between *config targets and build targets
63876
63877 # Basic helpers built in scripts/
63878-PHONY += scripts_basic
63879-scripts_basic:
63880+PHONY += scripts_basic gcc-plugins
63881+scripts_basic: gcc-plugins
63882 $(Q)$(MAKE) $(build)=scripts/basic
63883 $(Q)rm -f .tmp_quiet_recordmcount
63884
63885@@ -564,6 +567,31 @@ else
63886 KBUILD_CFLAGS += -O2
63887 endif
63888
63889+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
63890+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
63891+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
63892+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
63893+endif
63894+ifdef CONFIG_KALLOCSTAT_PLUGIN
63895+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
63896+endif
63897+ifdef CONFIG_PAX_MEMORY_STACKLEAK
63898+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
63899+endif
63900+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN)
63901+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN
63902+gcc-plugins:
63903+ $(Q)$(MAKE) $(build)=tools/gcc
63904+else
63905+gcc-plugins:
63906+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
63907+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
63908+else
63909+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
63910+endif
63911+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
63912+endif
63913+
63914 include $(srctree)/arch/$(SRCARCH)/Makefile
63915
63916 ifneq ($(CONFIG_FRAME_WARN),0)
63917@@ -708,7 +736,7 @@ export mod_strip_cmd
63918
63919
63920 ifeq ($(KBUILD_EXTMOD),)
63921-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63922+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63923
63924 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63925 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63926@@ -907,6 +935,8 @@ define rule_vmlinux-modpost
63927 endef
63928
63929 # vmlinux image - including updated kernel symbols
63930+$(vmlinux-all): KBUILD_CFLAGS += $(GCC_PLUGINS)
63931+$(vmlinux-all): gcc-plugins
63932 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
63933 ifdef CONFIG_HEADERS_CHECK
63934 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
63935@@ -941,7 +971,8 @@ $(sort $(vmlinux-init) $(vmlinux-main))
63936 # Error messages still appears in the original language
63937
63938 PHONY += $(vmlinux-dirs)
63939-$(vmlinux-dirs): prepare scripts
63940+$(vmlinux-dirs): KBUILD_CFLAGS += $(GCC_PLUGINS)
63941+$(vmlinux-dirs): gcc-plugins prepare scripts
63942 $(Q)$(MAKE) $(build)=$@
63943
63944 # Store (new) KERNELRELASE string in include/config/kernel.release
63945@@ -986,6 +1017,7 @@ prepare0: archprepare FORCE
63946 $(Q)$(MAKE) $(build)=. missing-syscalls
63947
63948 # All the preparing..
63949+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
63950 prepare: prepare0
63951
63952 # Generate some files
63953@@ -1102,7 +1134,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
63954
63955 # Target to prepare building external modules
63956 PHONY += modules_prepare
63957-modules_prepare: prepare scripts
63958+modules_prepare: gcc-plugins prepare scripts
63959
63960 # Target to install modules
63961 PHONY += modules_install
63962@@ -1198,7 +1230,7 @@ distclean: mrproper
63963 @find $(srctree) $(RCS_FIND_IGNORE) \
63964 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
63965 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
63966- -o -name '.*.rej' -o -size 0 \
63967+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
63968 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
63969 -type f -print | xargs rm -f
63970
63971@@ -1359,6 +1391,7 @@ PHONY += $(module-dirs) modules
63972 $(module-dirs): crmodverdir $(objtree)/Module.symvers
63973 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
63974
63975+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
63976 modules: $(module-dirs)
63977 @$(kecho) ' Building modules, stage 2.';
63978 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63979@@ -1485,17 +1518,19 @@ else
63980 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
63981 endif
63982
63983-%.s: %.c prepare scripts FORCE
63984+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
63985+%.s: %.c gcc-plugins prepare scripts FORCE
63986 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63987 %.i: %.c prepare scripts FORCE
63988 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63989-%.o: %.c prepare scripts FORCE
63990+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
63991+%.o: %.c gcc-plugins prepare scripts FORCE
63992 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63993 %.lst: %.c prepare scripts FORCE
63994 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63995-%.s: %.S prepare scripts FORCE
63996+%.s: %.S gcc-plugins prepare scripts FORCE
63997 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63998-%.o: %.S prepare scripts FORCE
63999+%.o: %.S gcc-plugins prepare scripts FORCE
64000 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
64001 %.symtypes: %.c prepare scripts FORCE
64002 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
64003@@ -1505,11 +1540,13 @@ endif
64004 $(cmd_crmodverdir)
64005 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
64006 $(build)=$(build-dir)
64007-%/: prepare scripts FORCE
64008+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
64009+%/: gcc-plugins prepare scripts FORCE
64010 $(cmd_crmodverdir)
64011 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
64012 $(build)=$(build-dir)
64013-%.ko: prepare scripts FORCE
64014+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
64015+%.ko: gcc-plugins prepare scripts FORCE
64016 $(cmd_crmodverdir)
64017 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
64018 $(build)=$(build-dir) $(@:.ko=.o)
64019diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
64020--- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
64021+++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
64022@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
64023 struct address_space *mapping = file->f_mapping;
64024
64025 if (!mapping->a_ops->readpage)
64026- return -ENOEXEC;
64027+ return -ENODEV;
64028 file_accessed(file);
64029 vma->vm_ops = &generic_file_vm_ops;
64030 vma->vm_flags |= VM_CAN_NONLINEAR;
64031@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
64032 *pos = i_size_read(inode);
64033
64034 if (limit != RLIM_INFINITY) {
64035+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
64036 if (*pos >= limit) {
64037 send_sig(SIGXFSZ, current, 0);
64038 return -EFBIG;
64039diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
64040--- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
64041+++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
64042@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
64043 retry:
64044 vma = find_vma(mm, start);
64045
64046+#ifdef CONFIG_PAX_SEGMEXEC
64047+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
64048+ goto out;
64049+#endif
64050+
64051 /*
64052 * Make sure the vma is shared, that it supports prefaulting,
64053 * and that the remapped range is valid and fully within
64054diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
64055--- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
64056+++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
64057@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
64058 * So no dangers, even with speculative execution.
64059 */
64060 page = pte_page(pkmap_page_table[i]);
64061+ pax_open_kernel();
64062 pte_clear(&init_mm, (unsigned long)page_address(page),
64063 &pkmap_page_table[i]);
64064-
64065+ pax_close_kernel();
64066 set_page_address(page, NULL);
64067 need_flush = 1;
64068 }
64069@@ -186,9 +187,11 @@ start:
64070 }
64071 }
64072 vaddr = PKMAP_ADDR(last_pkmap_nr);
64073+
64074+ pax_open_kernel();
64075 set_pte_at(&init_mm, vaddr,
64076 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
64077-
64078+ pax_close_kernel();
64079 pkmap_count[last_pkmap_nr] = 1;
64080 set_page_address(page, (void *)vaddr);
64081
64082diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
64083--- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
64084+++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
64085@@ -702,7 +702,7 @@ out:
64086 * run pte_offset_map on the pmd, if an huge pmd could
64087 * materialize from under us from a different thread.
64088 */
64089- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
64090+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64091 return VM_FAULT_OOM;
64092 /* if an huge pmd materialized from under us just retry later */
64093 if (unlikely(pmd_trans_huge(*pmd)))
64094diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
64095--- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
64096+++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
64097@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
64098 return 1;
64099 }
64100
64101+#ifdef CONFIG_PAX_SEGMEXEC
64102+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
64103+{
64104+ struct mm_struct *mm = vma->vm_mm;
64105+ struct vm_area_struct *vma_m;
64106+ unsigned long address_m;
64107+ pte_t *ptep_m;
64108+
64109+ vma_m = pax_find_mirror_vma(vma);
64110+ if (!vma_m)
64111+ return;
64112+
64113+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64114+ address_m = address + SEGMEXEC_TASK_SIZE;
64115+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
64116+ get_page(page_m);
64117+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
64118+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
64119+}
64120+#endif
64121+
64122 /*
64123 * Hugetlb_cow() should be called with page lock of the original hugepage held.
64124 */
64125@@ -2440,6 +2461,11 @@ retry_avoidcopy:
64126 make_huge_pte(vma, new_page, 1));
64127 page_remove_rmap(old_page);
64128 hugepage_add_new_anon_rmap(new_page, vma, address);
64129+
64130+#ifdef CONFIG_PAX_SEGMEXEC
64131+ pax_mirror_huge_pte(vma, address, new_page);
64132+#endif
64133+
64134 /* Make the old page be freed below */
64135 new_page = old_page;
64136 mmu_notifier_invalidate_range_end(mm,
64137@@ -2591,6 +2617,10 @@ retry:
64138 && (vma->vm_flags & VM_SHARED)));
64139 set_huge_pte_at(mm, address, ptep, new_pte);
64140
64141+#ifdef CONFIG_PAX_SEGMEXEC
64142+ pax_mirror_huge_pte(vma, address, page);
64143+#endif
64144+
64145 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
64146 /* Optimization, do the COW without a second fault */
64147 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
64148@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
64149 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
64150 struct hstate *h = hstate_vma(vma);
64151
64152+#ifdef CONFIG_PAX_SEGMEXEC
64153+ struct vm_area_struct *vma_m;
64154+#endif
64155+
64156 ptep = huge_pte_offset(mm, address);
64157 if (ptep) {
64158 entry = huge_ptep_get(ptep);
64159@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
64160 VM_FAULT_SET_HINDEX(h - hstates);
64161 }
64162
64163+#ifdef CONFIG_PAX_SEGMEXEC
64164+ vma_m = pax_find_mirror_vma(vma);
64165+ if (vma_m) {
64166+ unsigned long address_m;
64167+
64168+ if (vma->vm_start > vma_m->vm_start) {
64169+ address_m = address;
64170+ address -= SEGMEXEC_TASK_SIZE;
64171+ vma = vma_m;
64172+ h = hstate_vma(vma);
64173+ } else
64174+ address_m = address + SEGMEXEC_TASK_SIZE;
64175+
64176+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
64177+ return VM_FAULT_OOM;
64178+ address_m &= HPAGE_MASK;
64179+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
64180+ }
64181+#endif
64182+
64183 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
64184 if (!ptep)
64185 return VM_FAULT_OOM;
64186diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
64187--- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
64188+++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
64189@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
64190 * in mm/page_alloc.c
64191 */
64192 extern void __free_pages_bootmem(struct page *page, unsigned int order);
64193+extern void free_compound_page(struct page *page);
64194 extern void prep_compound_page(struct page *page, unsigned long order);
64195 #ifdef CONFIG_MEMORY_FAILURE
64196 extern bool is_free_buddy_page(struct page *page);
64197diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
64198--- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
64199+++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
64200@@ -240,7 +240,7 @@ config KSM
64201 config DEFAULT_MMAP_MIN_ADDR
64202 int "Low address space to protect from user allocation"
64203 depends on MMU
64204- default 4096
64205+ default 65536
64206 help
64207 This is the portion of low virtual memory which should be protected
64208 from userspace allocation. Keeping a user from writing to low pages
64209diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
64210--- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
64211+++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
64212@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
64213
64214 for (i = 0; i < object->trace_len; i++) {
64215 void *ptr = (void *)object->trace[i];
64216- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
64217+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
64218 }
64219 }
64220
64221diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
64222--- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
64223+++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
64224@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
64225 pgoff_t pgoff;
64226 unsigned long new_flags = vma->vm_flags;
64227
64228+#ifdef CONFIG_PAX_SEGMEXEC
64229+ struct vm_area_struct *vma_m;
64230+#endif
64231+
64232 switch (behavior) {
64233 case MADV_NORMAL:
64234 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
64235@@ -110,6 +114,13 @@ success:
64236 /*
64237 * vm_flags is protected by the mmap_sem held in write mode.
64238 */
64239+
64240+#ifdef CONFIG_PAX_SEGMEXEC
64241+ vma_m = pax_find_mirror_vma(vma);
64242+ if (vma_m)
64243+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
64244+#endif
64245+
64246 vma->vm_flags = new_flags;
64247
64248 out:
64249@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
64250 struct vm_area_struct ** prev,
64251 unsigned long start, unsigned long end)
64252 {
64253+
64254+#ifdef CONFIG_PAX_SEGMEXEC
64255+ struct vm_area_struct *vma_m;
64256+#endif
64257+
64258 *prev = vma;
64259 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
64260 return -EINVAL;
64261@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
64262 zap_page_range(vma, start, end - start, &details);
64263 } else
64264 zap_page_range(vma, start, end - start, NULL);
64265+
64266+#ifdef CONFIG_PAX_SEGMEXEC
64267+ vma_m = pax_find_mirror_vma(vma);
64268+ if (vma_m) {
64269+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
64270+ struct zap_details details = {
64271+ .nonlinear_vma = vma_m,
64272+ .last_index = ULONG_MAX,
64273+ };
64274+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
64275+ } else
64276+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
64277+ }
64278+#endif
64279+
64280 return 0;
64281 }
64282
64283@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
64284 if (end < start)
64285 goto out;
64286
64287+#ifdef CONFIG_PAX_SEGMEXEC
64288+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
64289+ if (end > SEGMEXEC_TASK_SIZE)
64290+ goto out;
64291+ } else
64292+#endif
64293+
64294+ if (end > TASK_SIZE)
64295+ goto out;
64296+
64297 error = 0;
64298 if (end == start)
64299 goto out;
64300diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
64301--- linux-3.0.4/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
64302+++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
64303@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
64304 return;
64305
64306 pmd = pmd_offset(pud, start);
64307+
64308+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
64309 pud_clear(pud);
64310 pmd_free_tlb(tlb, pmd, start);
64311+#endif
64312+
64313 }
64314
64315 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
64316@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
64317 if (end - 1 > ceiling - 1)
64318 return;
64319
64320+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
64321 pud = pud_offset(pgd, start);
64322 pgd_clear(pgd);
64323 pud_free_tlb(tlb, pud, start);
64324+#endif
64325+
64326 }
64327
64328 /*
64329@@ -1577,12 +1584,6 @@ no_page_table:
64330 return page;
64331 }
64332
64333-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64334-{
64335- return stack_guard_page_start(vma, addr) ||
64336- stack_guard_page_end(vma, addr+PAGE_SIZE);
64337-}
64338-
64339 /**
64340 * __get_user_pages() - pin user pages in memory
64341 * @tsk: task_struct of target task
64342@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
64343 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
64344 i = 0;
64345
64346- do {
64347+ while (nr_pages) {
64348 struct vm_area_struct *vma;
64349
64350- vma = find_extend_vma(mm, start);
64351+ vma = find_vma(mm, start);
64352 if (!vma && in_gate_area(mm, start)) {
64353 unsigned long pg = start & PAGE_MASK;
64354 pgd_t *pgd;
64355@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
64356 goto next_page;
64357 }
64358
64359- if (!vma ||
64360+ if (!vma || start < vma->vm_start ||
64361 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
64362 !(vm_flags & vma->vm_flags))
64363 return i ? : -EFAULT;
64364@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
64365 int ret;
64366 unsigned int fault_flags = 0;
64367
64368- /* For mlock, just skip the stack guard page. */
64369- if (foll_flags & FOLL_MLOCK) {
64370- if (stack_guard_page(vma, start))
64371- goto next_page;
64372- }
64373 if (foll_flags & FOLL_WRITE)
64374 fault_flags |= FAULT_FLAG_WRITE;
64375 if (nonblocking)
64376@@ -1811,7 +1807,7 @@ next_page:
64377 start += PAGE_SIZE;
64378 nr_pages--;
64379 } while (nr_pages && start < vma->vm_end);
64380- } while (nr_pages);
64381+ }
64382 return i;
64383 }
64384 EXPORT_SYMBOL(__get_user_pages);
64385@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
64386 page_add_file_rmap(page);
64387 set_pte_at(mm, addr, pte, mk_pte(page, prot));
64388
64389+#ifdef CONFIG_PAX_SEGMEXEC
64390+ pax_mirror_file_pte(vma, addr, page, ptl);
64391+#endif
64392+
64393 retval = 0;
64394 pte_unmap_unlock(pte, ptl);
64395 return retval;
64396@@ -2052,10 +2052,22 @@ out:
64397 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
64398 struct page *page)
64399 {
64400+
64401+#ifdef CONFIG_PAX_SEGMEXEC
64402+ struct vm_area_struct *vma_m;
64403+#endif
64404+
64405 if (addr < vma->vm_start || addr >= vma->vm_end)
64406 return -EFAULT;
64407 if (!page_count(page))
64408 return -EINVAL;
64409+
64410+#ifdef CONFIG_PAX_SEGMEXEC
64411+ vma_m = pax_find_mirror_vma(vma);
64412+ if (vma_m)
64413+ vma_m->vm_flags |= VM_INSERTPAGE;
64414+#endif
64415+
64416 vma->vm_flags |= VM_INSERTPAGE;
64417 return insert_page(vma, addr, page, vma->vm_page_prot);
64418 }
64419@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
64420 unsigned long pfn)
64421 {
64422 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
64423+ BUG_ON(vma->vm_mirror);
64424
64425 if (addr < vma->vm_start || addr >= vma->vm_end)
64426 return -EFAULT;
64427@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
64428 copy_user_highpage(dst, src, va, vma);
64429 }
64430
64431+#ifdef CONFIG_PAX_SEGMEXEC
64432+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
64433+{
64434+ struct mm_struct *mm = vma->vm_mm;
64435+ spinlock_t *ptl;
64436+ pte_t *pte, entry;
64437+
64438+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
64439+ entry = *pte;
64440+ if (!pte_present(entry)) {
64441+ if (!pte_none(entry)) {
64442+ BUG_ON(pte_file(entry));
64443+ free_swap_and_cache(pte_to_swp_entry(entry));
64444+ pte_clear_not_present_full(mm, address, pte, 0);
64445+ }
64446+ } else {
64447+ struct page *page;
64448+
64449+ flush_cache_page(vma, address, pte_pfn(entry));
64450+ entry = ptep_clear_flush(vma, address, pte);
64451+ BUG_ON(pte_dirty(entry));
64452+ page = vm_normal_page(vma, address, entry);
64453+ if (page) {
64454+ update_hiwater_rss(mm);
64455+ if (PageAnon(page))
64456+ dec_mm_counter_fast(mm, MM_ANONPAGES);
64457+ else
64458+ dec_mm_counter_fast(mm, MM_FILEPAGES);
64459+ page_remove_rmap(page);
64460+ page_cache_release(page);
64461+ }
64462+ }
64463+ pte_unmap_unlock(pte, ptl);
64464+}
64465+
64466+/* PaX: if vma is mirrored, synchronize the mirror's PTE
64467+ *
64468+ * the ptl of the lower mapped page is held on entry and is not released on exit
64469+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
64470+ */
64471+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64472+{
64473+ struct mm_struct *mm = vma->vm_mm;
64474+ unsigned long address_m;
64475+ spinlock_t *ptl_m;
64476+ struct vm_area_struct *vma_m;
64477+ pmd_t *pmd_m;
64478+ pte_t *pte_m, entry_m;
64479+
64480+ BUG_ON(!page_m || !PageAnon(page_m));
64481+
64482+ vma_m = pax_find_mirror_vma(vma);
64483+ if (!vma_m)
64484+ return;
64485+
64486+ BUG_ON(!PageLocked(page_m));
64487+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64488+ address_m = address + SEGMEXEC_TASK_SIZE;
64489+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64490+ pte_m = pte_offset_map(pmd_m, address_m);
64491+ ptl_m = pte_lockptr(mm, pmd_m);
64492+ if (ptl != ptl_m) {
64493+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64494+ if (!pte_none(*pte_m))
64495+ goto out;
64496+ }
64497+
64498+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64499+ page_cache_get(page_m);
64500+ page_add_anon_rmap(page_m, vma_m, address_m);
64501+ inc_mm_counter_fast(mm, MM_ANONPAGES);
64502+ set_pte_at(mm, address_m, pte_m, entry_m);
64503+ update_mmu_cache(vma_m, address_m, entry_m);
64504+out:
64505+ if (ptl != ptl_m)
64506+ spin_unlock(ptl_m);
64507+ pte_unmap(pte_m);
64508+ unlock_page(page_m);
64509+}
64510+
64511+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64512+{
64513+ struct mm_struct *mm = vma->vm_mm;
64514+ unsigned long address_m;
64515+ spinlock_t *ptl_m;
64516+ struct vm_area_struct *vma_m;
64517+ pmd_t *pmd_m;
64518+ pte_t *pte_m, entry_m;
64519+
64520+ BUG_ON(!page_m || PageAnon(page_m));
64521+
64522+ vma_m = pax_find_mirror_vma(vma);
64523+ if (!vma_m)
64524+ return;
64525+
64526+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64527+ address_m = address + SEGMEXEC_TASK_SIZE;
64528+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64529+ pte_m = pte_offset_map(pmd_m, address_m);
64530+ ptl_m = pte_lockptr(mm, pmd_m);
64531+ if (ptl != ptl_m) {
64532+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64533+ if (!pte_none(*pte_m))
64534+ goto out;
64535+ }
64536+
64537+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64538+ page_cache_get(page_m);
64539+ page_add_file_rmap(page_m);
64540+ inc_mm_counter_fast(mm, MM_FILEPAGES);
64541+ set_pte_at(mm, address_m, pte_m, entry_m);
64542+ update_mmu_cache(vma_m, address_m, entry_m);
64543+out:
64544+ if (ptl != ptl_m)
64545+ spin_unlock(ptl_m);
64546+ pte_unmap(pte_m);
64547+}
64548+
64549+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64550+{
64551+ struct mm_struct *mm = vma->vm_mm;
64552+ unsigned long address_m;
64553+ spinlock_t *ptl_m;
64554+ struct vm_area_struct *vma_m;
64555+ pmd_t *pmd_m;
64556+ pte_t *pte_m, entry_m;
64557+
64558+ vma_m = pax_find_mirror_vma(vma);
64559+ if (!vma_m)
64560+ return;
64561+
64562+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64563+ address_m = address + SEGMEXEC_TASK_SIZE;
64564+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64565+ pte_m = pte_offset_map(pmd_m, address_m);
64566+ ptl_m = pte_lockptr(mm, pmd_m);
64567+ if (ptl != ptl_m) {
64568+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64569+ if (!pte_none(*pte_m))
64570+ goto out;
64571+ }
64572+
64573+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64574+ set_pte_at(mm, address_m, pte_m, entry_m);
64575+out:
64576+ if (ptl != ptl_m)
64577+ spin_unlock(ptl_m);
64578+ pte_unmap(pte_m);
64579+}
64580+
64581+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64582+{
64583+ struct page *page_m;
64584+ pte_t entry;
64585+
64586+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64587+ goto out;
64588+
64589+ entry = *pte;
64590+ page_m = vm_normal_page(vma, address, entry);
64591+ if (!page_m)
64592+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64593+ else if (PageAnon(page_m)) {
64594+ if (pax_find_mirror_vma(vma)) {
64595+ pte_unmap_unlock(pte, ptl);
64596+ lock_page(page_m);
64597+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64598+ if (pte_same(entry, *pte))
64599+ pax_mirror_anon_pte(vma, address, page_m, ptl);
64600+ else
64601+ unlock_page(page_m);
64602+ }
64603+ } else
64604+ pax_mirror_file_pte(vma, address, page_m, ptl);
64605+
64606+out:
64607+ pte_unmap_unlock(pte, ptl);
64608+}
64609+#endif
64610+
64611 /*
64612 * This routine handles present pages, when users try to write
64613 * to a shared page. It is done by copying the page to a new address
64614@@ -2667,6 +2860,12 @@ gotten:
64615 */
64616 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64617 if (likely(pte_same(*page_table, orig_pte))) {
64618+
64619+#ifdef CONFIG_PAX_SEGMEXEC
64620+ if (pax_find_mirror_vma(vma))
64621+ BUG_ON(!trylock_page(new_page));
64622+#endif
64623+
64624 if (old_page) {
64625 if (!PageAnon(old_page)) {
64626 dec_mm_counter_fast(mm, MM_FILEPAGES);
64627@@ -2718,6 +2917,10 @@ gotten:
64628 page_remove_rmap(old_page);
64629 }
64630
64631+#ifdef CONFIG_PAX_SEGMEXEC
64632+ pax_mirror_anon_pte(vma, address, new_page, ptl);
64633+#endif
64634+
64635 /* Free the old page.. */
64636 new_page = old_page;
64637 ret |= VM_FAULT_WRITE;
64638@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
64639 swap_free(entry);
64640 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64641 try_to_free_swap(page);
64642+
64643+#ifdef CONFIG_PAX_SEGMEXEC
64644+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64645+#endif
64646+
64647 unlock_page(page);
64648 if (swapcache) {
64649 /*
64650@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
64651
64652 /* No need to invalidate - it was non-present before */
64653 update_mmu_cache(vma, address, page_table);
64654+
64655+#ifdef CONFIG_PAX_SEGMEXEC
64656+ pax_mirror_anon_pte(vma, address, page, ptl);
64657+#endif
64658+
64659 unlock:
64660 pte_unmap_unlock(page_table, ptl);
64661 out:
64662@@ -3039,40 +3252,6 @@ out_release:
64663 }
64664
64665 /*
64666- * This is like a special single-page "expand_{down|up}wards()",
64667- * except we must first make sure that 'address{-|+}PAGE_SIZE'
64668- * doesn't hit another vma.
64669- */
64670-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64671-{
64672- address &= PAGE_MASK;
64673- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64674- struct vm_area_struct *prev = vma->vm_prev;
64675-
64676- /*
64677- * Is there a mapping abutting this one below?
64678- *
64679- * That's only ok if it's the same stack mapping
64680- * that has gotten split..
64681- */
64682- if (prev && prev->vm_end == address)
64683- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64684-
64685- expand_downwards(vma, address - PAGE_SIZE);
64686- }
64687- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64688- struct vm_area_struct *next = vma->vm_next;
64689-
64690- /* As VM_GROWSDOWN but s/below/above/ */
64691- if (next && next->vm_start == address + PAGE_SIZE)
64692- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64693-
64694- expand_upwards(vma, address + PAGE_SIZE);
64695- }
64696- return 0;
64697-}
64698-
64699-/*
64700 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64701 * but allow concurrent faults), and pte mapped but not yet locked.
64702 * We return with mmap_sem still held, but pte unmapped and unlocked.
64703@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
64704 unsigned long address, pte_t *page_table, pmd_t *pmd,
64705 unsigned int flags)
64706 {
64707- struct page *page;
64708+ struct page *page = NULL;
64709 spinlock_t *ptl;
64710 pte_t entry;
64711
64712- pte_unmap(page_table);
64713-
64714- /* Check if we need to add a guard page to the stack */
64715- if (check_stack_guard_page(vma, address) < 0)
64716- return VM_FAULT_SIGBUS;
64717-
64718- /* Use the zero-page for reads */
64719 if (!(flags & FAULT_FLAG_WRITE)) {
64720 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64721 vma->vm_page_prot));
64722- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64723+ ptl = pte_lockptr(mm, pmd);
64724+ spin_lock(ptl);
64725 if (!pte_none(*page_table))
64726 goto unlock;
64727 goto setpte;
64728 }
64729
64730 /* Allocate our own private page. */
64731+ pte_unmap(page_table);
64732+
64733 if (unlikely(anon_vma_prepare(vma)))
64734 goto oom;
64735 page = alloc_zeroed_user_highpage_movable(vma, address);
64736@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
64737 if (!pte_none(*page_table))
64738 goto release;
64739
64740+#ifdef CONFIG_PAX_SEGMEXEC
64741+ if (pax_find_mirror_vma(vma))
64742+ BUG_ON(!trylock_page(page));
64743+#endif
64744+
64745 inc_mm_counter_fast(mm, MM_ANONPAGES);
64746 page_add_new_anon_rmap(page, vma, address);
64747 setpte:
64748@@ -3127,6 +3307,12 @@ setpte:
64749
64750 /* No need to invalidate - it was non-present before */
64751 update_mmu_cache(vma, address, page_table);
64752+
64753+#ifdef CONFIG_PAX_SEGMEXEC
64754+ if (page)
64755+ pax_mirror_anon_pte(vma, address, page, ptl);
64756+#endif
64757+
64758 unlock:
64759 pte_unmap_unlock(page_table, ptl);
64760 return 0;
64761@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
64762 */
64763 /* Only go through if we didn't race with anybody else... */
64764 if (likely(pte_same(*page_table, orig_pte))) {
64765+
64766+#ifdef CONFIG_PAX_SEGMEXEC
64767+ if (anon && pax_find_mirror_vma(vma))
64768+ BUG_ON(!trylock_page(page));
64769+#endif
64770+
64771 flush_icache_page(vma, page);
64772 entry = mk_pte(page, vma->vm_page_prot);
64773 if (flags & FAULT_FLAG_WRITE)
64774@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
64775
64776 /* no need to invalidate: a not-present page won't be cached */
64777 update_mmu_cache(vma, address, page_table);
64778+
64779+#ifdef CONFIG_PAX_SEGMEXEC
64780+ if (anon)
64781+ pax_mirror_anon_pte(vma, address, page, ptl);
64782+ else
64783+ pax_mirror_file_pte(vma, address, page, ptl);
64784+#endif
64785+
64786 } else {
64787 if (charged)
64788 mem_cgroup_uncharge_page(page);
64789@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
64790 if (flags & FAULT_FLAG_WRITE)
64791 flush_tlb_fix_spurious_fault(vma, address);
64792 }
64793+
64794+#ifdef CONFIG_PAX_SEGMEXEC
64795+ pax_mirror_pte(vma, address, pte, pmd, ptl);
64796+ return 0;
64797+#endif
64798+
64799 unlock:
64800 pte_unmap_unlock(pte, ptl);
64801 return 0;
64802@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
64803 pmd_t *pmd;
64804 pte_t *pte;
64805
64806+#ifdef CONFIG_PAX_SEGMEXEC
64807+ struct vm_area_struct *vma_m;
64808+#endif
64809+
64810 __set_current_state(TASK_RUNNING);
64811
64812 count_vm_event(PGFAULT);
64813@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
64814 if (unlikely(is_vm_hugetlb_page(vma)))
64815 return hugetlb_fault(mm, vma, address, flags);
64816
64817+#ifdef CONFIG_PAX_SEGMEXEC
64818+ vma_m = pax_find_mirror_vma(vma);
64819+ if (vma_m) {
64820+ unsigned long address_m;
64821+ pgd_t *pgd_m;
64822+ pud_t *pud_m;
64823+ pmd_t *pmd_m;
64824+
64825+ if (vma->vm_start > vma_m->vm_start) {
64826+ address_m = address;
64827+ address -= SEGMEXEC_TASK_SIZE;
64828+ vma = vma_m;
64829+ } else
64830+ address_m = address + SEGMEXEC_TASK_SIZE;
64831+
64832+ pgd_m = pgd_offset(mm, address_m);
64833+ pud_m = pud_alloc(mm, pgd_m, address_m);
64834+ if (!pud_m)
64835+ return VM_FAULT_OOM;
64836+ pmd_m = pmd_alloc(mm, pud_m, address_m);
64837+ if (!pmd_m)
64838+ return VM_FAULT_OOM;
64839+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
64840+ return VM_FAULT_OOM;
64841+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64842+ }
64843+#endif
64844+
64845 pgd = pgd_offset(mm, address);
64846 pud = pud_alloc(mm, pgd, address);
64847 if (!pud)
64848@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
64849 * run pte_offset_map on the pmd, if an huge pmd could
64850 * materialize from under us from a different thread.
64851 */
64852- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
64853+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64854 return VM_FAULT_OOM;
64855 /* if an huge pmd materialized from under us just retry later */
64856 if (unlikely(pmd_trans_huge(*pmd)))
64857@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
64858 gate_vma.vm_start = FIXADDR_USER_START;
64859 gate_vma.vm_end = FIXADDR_USER_END;
64860 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64861- gate_vma.vm_page_prot = __P101;
64862+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64863 /*
64864 * Make sure the vDSO gets into every core dump.
64865 * Dumping its contents makes post-mortem fully interpretable later
64866diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
64867--- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
64868+++ linux-3.0.4/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
64869@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
64870
64871 int sysctl_memory_failure_recovery __read_mostly = 1;
64872
64873-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64874+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64875
64876 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
64877
64878@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
64879 }
64880
64881 nr_pages = 1 << compound_trans_order(hpage);
64882- atomic_long_add(nr_pages, &mce_bad_pages);
64883+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
64884
64885 /*
64886 * We need/can do nothing about count=0 pages.
64887@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
64888 if (!PageHWPoison(hpage)
64889 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
64890 || (p != hpage && TestSetPageHWPoison(hpage))) {
64891- atomic_long_sub(nr_pages, &mce_bad_pages);
64892+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64893 return 0;
64894 }
64895 set_page_hwpoison_huge_page(hpage);
64896@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
64897 }
64898 if (hwpoison_filter(p)) {
64899 if (TestClearPageHWPoison(p))
64900- atomic_long_sub(nr_pages, &mce_bad_pages);
64901+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64902 unlock_page(hpage);
64903 put_page(hpage);
64904 return 0;
64905@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
64906 return 0;
64907 }
64908 if (TestClearPageHWPoison(p))
64909- atomic_long_sub(nr_pages, &mce_bad_pages);
64910+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64911 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
64912 return 0;
64913 }
64914@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
64915 */
64916 if (TestClearPageHWPoison(page)) {
64917 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
64918- atomic_long_sub(nr_pages, &mce_bad_pages);
64919+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64920 freeit = 1;
64921 if (PageHuge(page))
64922 clear_page_hwpoison_huge_page(page);
64923@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
64924 }
64925 done:
64926 if (!PageHWPoison(hpage))
64927- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
64928+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
64929 set_page_hwpoison_huge_page(hpage);
64930 dequeue_hwpoisoned_huge_page(hpage);
64931 /* keep elevated page count for bad page */
64932@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
64933 return ret;
64934
64935 done:
64936- atomic_long_add(1, &mce_bad_pages);
64937+ atomic_long_add_unchecked(1, &mce_bad_pages);
64938 SetPageHWPoison(page);
64939 /* keep elevated page count for bad page */
64940 return ret;
64941diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
64942--- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
64943+++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
64944@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
64945 unsigned long vmstart;
64946 unsigned long vmend;
64947
64948+#ifdef CONFIG_PAX_SEGMEXEC
64949+ struct vm_area_struct *vma_m;
64950+#endif
64951+
64952 vma = find_vma_prev(mm, start, &prev);
64953 if (!vma || vma->vm_start > start)
64954 return -EFAULT;
64955@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
64956 err = policy_vma(vma, new_pol);
64957 if (err)
64958 goto out;
64959+
64960+#ifdef CONFIG_PAX_SEGMEXEC
64961+ vma_m = pax_find_mirror_vma(vma);
64962+ if (vma_m) {
64963+ err = policy_vma(vma_m, new_pol);
64964+ if (err)
64965+ goto out;
64966+ }
64967+#endif
64968+
64969 }
64970
64971 out:
64972@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
64973
64974 if (end < start)
64975 return -EINVAL;
64976+
64977+#ifdef CONFIG_PAX_SEGMEXEC
64978+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64979+ if (end > SEGMEXEC_TASK_SIZE)
64980+ return -EINVAL;
64981+ } else
64982+#endif
64983+
64984+ if (end > TASK_SIZE)
64985+ return -EINVAL;
64986+
64987 if (end == start)
64988 return 0;
64989
64990@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64991 if (!mm)
64992 goto out;
64993
64994+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64995+ if (mm != current->mm &&
64996+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64997+ err = -EPERM;
64998+ goto out;
64999+ }
65000+#endif
65001+
65002 /*
65003 * Check if this process has the right to modify the specified
65004 * process. The right exists if the process has administrative
65005@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
65006 rcu_read_lock();
65007 tcred = __task_cred(task);
65008 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
65009- cred->uid != tcred->suid && cred->uid != tcred->uid &&
65010- !capable(CAP_SYS_NICE)) {
65011+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
65012 rcu_read_unlock();
65013 err = -EPERM;
65014 goto out;
65015diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
65016--- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
65017+++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
65018@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
65019 unsigned long chunk_start;
65020 int err;
65021
65022+ pax_track_stack();
65023+
65024 task_nodes = cpuset_mems_allowed(task);
65025
65026 err = -ENOMEM;
65027@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
65028 if (!mm)
65029 return -EINVAL;
65030
65031+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65032+ if (mm != current->mm &&
65033+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
65034+ err = -EPERM;
65035+ goto out;
65036+ }
65037+#endif
65038+
65039 /*
65040 * Check if this process has the right to modify the specified
65041 * process. The right exists if the process has administrative
65042@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
65043 rcu_read_lock();
65044 tcred = __task_cred(task);
65045 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
65046- cred->uid != tcred->suid && cred->uid != tcred->uid &&
65047- !capable(CAP_SYS_NICE)) {
65048+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
65049 rcu_read_unlock();
65050 err = -EPERM;
65051 goto out;
65052diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
65053--- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
65054+++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
65055@@ -13,6 +13,7 @@
65056 #include <linux/pagemap.h>
65057 #include <linux/mempolicy.h>
65058 #include <linux/syscalls.h>
65059+#include <linux/security.h>
65060 #include <linux/sched.h>
65061 #include <linux/module.h>
65062 #include <linux/rmap.h>
65063@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
65064 return -EINVAL;
65065 if (end == start)
65066 return 0;
65067+ if (end > TASK_SIZE)
65068+ return -EINVAL;
65069+
65070 vma = find_vma_prev(current->mm, start, &prev);
65071 if (!vma || vma->vm_start > start)
65072 return -ENOMEM;
65073@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
65074 for (nstart = start ; ; ) {
65075 vm_flags_t newflags;
65076
65077+#ifdef CONFIG_PAX_SEGMEXEC
65078+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65079+ break;
65080+#endif
65081+
65082 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
65083
65084 newflags = vma->vm_flags | VM_LOCKED;
65085@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
65086 lock_limit >>= PAGE_SHIFT;
65087
65088 /* check against resource limits */
65089+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
65090 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
65091 error = do_mlock(start, len, 1);
65092 up_write(&current->mm->mmap_sem);
65093@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
65094 static int do_mlockall(int flags)
65095 {
65096 struct vm_area_struct * vma, * prev = NULL;
65097- unsigned int def_flags = 0;
65098
65099 if (flags & MCL_FUTURE)
65100- def_flags = VM_LOCKED;
65101- current->mm->def_flags = def_flags;
65102+ current->mm->def_flags |= VM_LOCKED;
65103+ else
65104+ current->mm->def_flags &= ~VM_LOCKED;
65105 if (flags == MCL_FUTURE)
65106 goto out;
65107
65108 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
65109 vm_flags_t newflags;
65110
65111+#ifdef CONFIG_PAX_SEGMEXEC
65112+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65113+ break;
65114+#endif
65115+
65116+ BUG_ON(vma->vm_end > TASK_SIZE);
65117 newflags = vma->vm_flags | VM_LOCKED;
65118 if (!(flags & MCL_CURRENT))
65119 newflags &= ~VM_LOCKED;
65120@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
65121 lock_limit >>= PAGE_SHIFT;
65122
65123 ret = -ENOMEM;
65124+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
65125 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
65126 capable(CAP_IPC_LOCK))
65127 ret = do_mlockall(flags);
65128diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
65129--- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
65130+++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
65131@@ -46,6 +46,16 @@
65132 #define arch_rebalance_pgtables(addr, len) (addr)
65133 #endif
65134
65135+static inline void verify_mm_writelocked(struct mm_struct *mm)
65136+{
65137+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
65138+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65139+ up_read(&mm->mmap_sem);
65140+ BUG();
65141+ }
65142+#endif
65143+}
65144+
65145 static void unmap_region(struct mm_struct *mm,
65146 struct vm_area_struct *vma, struct vm_area_struct *prev,
65147 unsigned long start, unsigned long end);
65148@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
65149 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
65150 *
65151 */
65152-pgprot_t protection_map[16] = {
65153+pgprot_t protection_map[16] __read_only = {
65154 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
65155 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
65156 };
65157
65158-pgprot_t vm_get_page_prot(unsigned long vm_flags)
65159+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
65160 {
65161- return __pgprot(pgprot_val(protection_map[vm_flags &
65162+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
65163 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
65164 pgprot_val(arch_vm_get_page_prot(vm_flags)));
65165+
65166+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65167+ if (!(__supported_pte_mask & _PAGE_NX) &&
65168+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
65169+ (vm_flags & (VM_READ | VM_WRITE)))
65170+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
65171+#endif
65172+
65173+ return prot;
65174 }
65175 EXPORT_SYMBOL(vm_get_page_prot);
65176
65177 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
65178 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
65179 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
65180+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
65181 /*
65182 * Make sure vm_committed_as in one cacheline and not cacheline shared with
65183 * other variables. It can be updated by several CPUs frequently.
65184@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
65185 struct vm_area_struct *next = vma->vm_next;
65186
65187 might_sleep();
65188+ BUG_ON(vma->vm_mirror);
65189 if (vma->vm_ops && vma->vm_ops->close)
65190 vma->vm_ops->close(vma);
65191 if (vma->vm_file) {
65192@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
65193 * not page aligned -Ram Gupta
65194 */
65195 rlim = rlimit(RLIMIT_DATA);
65196+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
65197 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
65198 (mm->end_data - mm->start_data) > rlim)
65199 goto out;
65200@@ -697,6 +719,12 @@ static int
65201 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
65202 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65203 {
65204+
65205+#ifdef CONFIG_PAX_SEGMEXEC
65206+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
65207+ return 0;
65208+#endif
65209+
65210 if (is_mergeable_vma(vma, file, vm_flags) &&
65211 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
65212 if (vma->vm_pgoff == vm_pgoff)
65213@@ -716,6 +744,12 @@ static int
65214 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
65215 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65216 {
65217+
65218+#ifdef CONFIG_PAX_SEGMEXEC
65219+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
65220+ return 0;
65221+#endif
65222+
65223 if (is_mergeable_vma(vma, file, vm_flags) &&
65224 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
65225 pgoff_t vm_pglen;
65226@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
65227 struct vm_area_struct *vma_merge(struct mm_struct *mm,
65228 struct vm_area_struct *prev, unsigned long addr,
65229 unsigned long end, unsigned long vm_flags,
65230- struct anon_vma *anon_vma, struct file *file,
65231+ struct anon_vma *anon_vma, struct file *file,
65232 pgoff_t pgoff, struct mempolicy *policy)
65233 {
65234 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
65235 struct vm_area_struct *area, *next;
65236 int err;
65237
65238+#ifdef CONFIG_PAX_SEGMEXEC
65239+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
65240+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
65241+
65242+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
65243+#endif
65244+
65245 /*
65246 * We later require that vma->vm_flags == vm_flags,
65247 * so this tests vma->vm_flags & VM_SPECIAL, too.
65248@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
65249 if (next && next->vm_end == end) /* cases 6, 7, 8 */
65250 next = next->vm_next;
65251
65252+#ifdef CONFIG_PAX_SEGMEXEC
65253+ if (prev)
65254+ prev_m = pax_find_mirror_vma(prev);
65255+ if (area)
65256+ area_m = pax_find_mirror_vma(area);
65257+ if (next)
65258+ next_m = pax_find_mirror_vma(next);
65259+#endif
65260+
65261 /*
65262 * Can it merge with the predecessor?
65263 */
65264@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
65265 /* cases 1, 6 */
65266 err = vma_adjust(prev, prev->vm_start,
65267 next->vm_end, prev->vm_pgoff, NULL);
65268- } else /* cases 2, 5, 7 */
65269+
65270+#ifdef CONFIG_PAX_SEGMEXEC
65271+ if (!err && prev_m)
65272+ err = vma_adjust(prev_m, prev_m->vm_start,
65273+ next_m->vm_end, prev_m->vm_pgoff, NULL);
65274+#endif
65275+
65276+ } else { /* cases 2, 5, 7 */
65277 err = vma_adjust(prev, prev->vm_start,
65278 end, prev->vm_pgoff, NULL);
65279+
65280+#ifdef CONFIG_PAX_SEGMEXEC
65281+ if (!err && prev_m)
65282+ err = vma_adjust(prev_m, prev_m->vm_start,
65283+ end_m, prev_m->vm_pgoff, NULL);
65284+#endif
65285+
65286+ }
65287 if (err)
65288 return NULL;
65289 khugepaged_enter_vma_merge(prev);
65290@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
65291 mpol_equal(policy, vma_policy(next)) &&
65292 can_vma_merge_before(next, vm_flags,
65293 anon_vma, file, pgoff+pglen)) {
65294- if (prev && addr < prev->vm_end) /* case 4 */
65295+ if (prev && addr < prev->vm_end) { /* case 4 */
65296 err = vma_adjust(prev, prev->vm_start,
65297 addr, prev->vm_pgoff, NULL);
65298- else /* cases 3, 8 */
65299+
65300+#ifdef CONFIG_PAX_SEGMEXEC
65301+ if (!err && prev_m)
65302+ err = vma_adjust(prev_m, prev_m->vm_start,
65303+ addr_m, prev_m->vm_pgoff, NULL);
65304+#endif
65305+
65306+ } else { /* cases 3, 8 */
65307 err = vma_adjust(area, addr, next->vm_end,
65308 next->vm_pgoff - pglen, NULL);
65309+
65310+#ifdef CONFIG_PAX_SEGMEXEC
65311+ if (!err && area_m)
65312+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
65313+ next_m->vm_pgoff - pglen, NULL);
65314+#endif
65315+
65316+ }
65317 if (err)
65318 return NULL;
65319 khugepaged_enter_vma_merge(area);
65320@@ -929,14 +1009,11 @@ none:
65321 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
65322 struct file *file, long pages)
65323 {
65324- const unsigned long stack_flags
65325- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
65326-
65327 if (file) {
65328 mm->shared_vm += pages;
65329 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
65330 mm->exec_vm += pages;
65331- } else if (flags & stack_flags)
65332+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
65333 mm->stack_vm += pages;
65334 if (flags & (VM_RESERVED|VM_IO))
65335 mm->reserved_vm += pages;
65336@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
65337 * (the exception is when the underlying filesystem is noexec
65338 * mounted, in which case we dont add PROT_EXEC.)
65339 */
65340- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65341+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65342 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
65343 prot |= PROT_EXEC;
65344
65345@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
65346 /* Obtain the address to map to. we verify (or select) it and ensure
65347 * that it represents a valid section of the address space.
65348 */
65349- addr = get_unmapped_area(file, addr, len, pgoff, flags);
65350+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
65351 if (addr & ~PAGE_MASK)
65352 return addr;
65353
65354@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
65355 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
65356 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
65357
65358+#ifdef CONFIG_PAX_MPROTECT
65359+ if (mm->pax_flags & MF_PAX_MPROTECT) {
65360+#ifndef CONFIG_PAX_MPROTECT_COMPAT
65361+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
65362+ gr_log_rwxmmap(file);
65363+
65364+#ifdef CONFIG_PAX_EMUPLT
65365+ vm_flags &= ~VM_EXEC;
65366+#else
65367+ return -EPERM;
65368+#endif
65369+
65370+ }
65371+
65372+ if (!(vm_flags & VM_EXEC))
65373+ vm_flags &= ~VM_MAYEXEC;
65374+#else
65375+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65376+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65377+#endif
65378+ else
65379+ vm_flags &= ~VM_MAYWRITE;
65380+ }
65381+#endif
65382+
65383+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65384+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
65385+ vm_flags &= ~VM_PAGEEXEC;
65386+#endif
65387+
65388 if (flags & MAP_LOCKED)
65389 if (!can_do_mlock())
65390 return -EPERM;
65391@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
65392 locked += mm->locked_vm;
65393 lock_limit = rlimit(RLIMIT_MEMLOCK);
65394 lock_limit >>= PAGE_SHIFT;
65395+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65396 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
65397 return -EAGAIN;
65398 }
65399@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
65400 if (error)
65401 return error;
65402
65403+ if (!gr_acl_handle_mmap(file, prot))
65404+ return -EACCES;
65405+
65406 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
65407 }
65408 EXPORT_SYMBOL(do_mmap_pgoff);
65409@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
65410 vm_flags_t vm_flags = vma->vm_flags;
65411
65412 /* If it was private or non-writable, the write bit is already clear */
65413- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
65414+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
65415 return 0;
65416
65417 /* The backer wishes to know when pages are first written to? */
65418@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
65419 unsigned long charged = 0;
65420 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
65421
65422+#ifdef CONFIG_PAX_SEGMEXEC
65423+ struct vm_area_struct *vma_m = NULL;
65424+#endif
65425+
65426+ /*
65427+ * mm->mmap_sem is required to protect against another thread
65428+ * changing the mappings in case we sleep.
65429+ */
65430+ verify_mm_writelocked(mm);
65431+
65432 /* Clear old maps */
65433 error = -ENOMEM;
65434-munmap_back:
65435 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65436 if (vma && vma->vm_start < addr + len) {
65437 if (do_munmap(mm, addr, len))
65438 return -ENOMEM;
65439- goto munmap_back;
65440+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65441+ BUG_ON(vma && vma->vm_start < addr + len);
65442 }
65443
65444 /* Check against address space limit. */
65445@@ -1266,6 +1387,16 @@ munmap_back:
65446 goto unacct_error;
65447 }
65448
65449+#ifdef CONFIG_PAX_SEGMEXEC
65450+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
65451+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65452+ if (!vma_m) {
65453+ error = -ENOMEM;
65454+ goto free_vma;
65455+ }
65456+ }
65457+#endif
65458+
65459 vma->vm_mm = mm;
65460 vma->vm_start = addr;
65461 vma->vm_end = addr + len;
65462@@ -1289,6 +1420,19 @@ munmap_back:
65463 error = file->f_op->mmap(file, vma);
65464 if (error)
65465 goto unmap_and_free_vma;
65466+
65467+#ifdef CONFIG_PAX_SEGMEXEC
65468+ if (vma_m && (vm_flags & VM_EXECUTABLE))
65469+ added_exe_file_vma(mm);
65470+#endif
65471+
65472+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65473+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
65474+ vma->vm_flags |= VM_PAGEEXEC;
65475+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65476+ }
65477+#endif
65478+
65479 if (vm_flags & VM_EXECUTABLE)
65480 added_exe_file_vma(mm);
65481
65482@@ -1324,6 +1468,11 @@ munmap_back:
65483 vma_link(mm, vma, prev, rb_link, rb_parent);
65484 file = vma->vm_file;
65485
65486+#ifdef CONFIG_PAX_SEGMEXEC
65487+ if (vma_m)
65488+ BUG_ON(pax_mirror_vma(vma_m, vma));
65489+#endif
65490+
65491 /* Once vma denies write, undo our temporary denial count */
65492 if (correct_wcount)
65493 atomic_inc(&inode->i_writecount);
65494@@ -1332,6 +1481,7 @@ out:
65495
65496 mm->total_vm += len >> PAGE_SHIFT;
65497 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
65498+ track_exec_limit(mm, addr, addr + len, vm_flags);
65499 if (vm_flags & VM_LOCKED) {
65500 if (!mlock_vma_pages_range(vma, addr, addr + len))
65501 mm->locked_vm += (len >> PAGE_SHIFT);
65502@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
65503 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
65504 charged = 0;
65505 free_vma:
65506+
65507+#ifdef CONFIG_PAX_SEGMEXEC
65508+ if (vma_m)
65509+ kmem_cache_free(vm_area_cachep, vma_m);
65510+#endif
65511+
65512 kmem_cache_free(vm_area_cachep, vma);
65513 unacct_error:
65514 if (charged)
65515@@ -1356,6 +1512,44 @@ unacct_error:
65516 return error;
65517 }
65518
65519+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
65520+{
65521+ if (!vma) {
65522+#ifdef CONFIG_STACK_GROWSUP
65523+ if (addr > sysctl_heap_stack_gap)
65524+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
65525+ else
65526+ vma = find_vma(current->mm, 0);
65527+ if (vma && (vma->vm_flags & VM_GROWSUP))
65528+ return false;
65529+#endif
65530+ return true;
65531+ }
65532+
65533+ if (addr + len > vma->vm_start)
65534+ return false;
65535+
65536+ if (vma->vm_flags & VM_GROWSDOWN)
65537+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
65538+#ifdef CONFIG_STACK_GROWSUP
65539+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
65540+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
65541+#endif
65542+
65543+ return true;
65544+}
65545+
65546+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65547+{
65548+ if (vma->vm_start < len)
65549+ return -ENOMEM;
65550+ if (!(vma->vm_flags & VM_GROWSDOWN))
65551+ return vma->vm_start - len;
65552+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
65553+ return vma->vm_start - len - sysctl_heap_stack_gap;
65554+ return -ENOMEM;
65555+}
65556+
65557 /* Get an address range which is currently unmapped.
65558 * For shmat() with addr=0.
65559 *
65560@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
65561 if (flags & MAP_FIXED)
65562 return addr;
65563
65564+#ifdef CONFIG_PAX_RANDMMAP
65565+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65566+#endif
65567+
65568 if (addr) {
65569 addr = PAGE_ALIGN(addr);
65570- vma = find_vma(mm, addr);
65571- if (TASK_SIZE - len >= addr &&
65572- (!vma || addr + len <= vma->vm_start))
65573- return addr;
65574+ if (TASK_SIZE - len >= addr) {
65575+ vma = find_vma(mm, addr);
65576+ if (check_heap_stack_gap(vma, addr, len))
65577+ return addr;
65578+ }
65579 }
65580 if (len > mm->cached_hole_size) {
65581- start_addr = addr = mm->free_area_cache;
65582+ start_addr = addr = mm->free_area_cache;
65583 } else {
65584- start_addr = addr = TASK_UNMAPPED_BASE;
65585- mm->cached_hole_size = 0;
65586+ start_addr = addr = mm->mmap_base;
65587+ mm->cached_hole_size = 0;
65588 }
65589
65590 full_search:
65591@@ -1404,34 +1603,40 @@ full_search:
65592 * Start a new search - just in case we missed
65593 * some holes.
65594 */
65595- if (start_addr != TASK_UNMAPPED_BASE) {
65596- addr = TASK_UNMAPPED_BASE;
65597- start_addr = addr;
65598+ if (start_addr != mm->mmap_base) {
65599+ start_addr = addr = mm->mmap_base;
65600 mm->cached_hole_size = 0;
65601 goto full_search;
65602 }
65603 return -ENOMEM;
65604 }
65605- if (!vma || addr + len <= vma->vm_start) {
65606- /*
65607- * Remember the place where we stopped the search:
65608- */
65609- mm->free_area_cache = addr + len;
65610- return addr;
65611- }
65612+ if (check_heap_stack_gap(vma, addr, len))
65613+ break;
65614 if (addr + mm->cached_hole_size < vma->vm_start)
65615 mm->cached_hole_size = vma->vm_start - addr;
65616 addr = vma->vm_end;
65617 }
65618+
65619+ /*
65620+ * Remember the place where we stopped the search:
65621+ */
65622+ mm->free_area_cache = addr + len;
65623+ return addr;
65624 }
65625 #endif
65626
65627 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65628 {
65629+
65630+#ifdef CONFIG_PAX_SEGMEXEC
65631+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65632+ return;
65633+#endif
65634+
65635 /*
65636 * Is this a new hole at the lowest possible address?
65637 */
65638- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65639+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65640 mm->free_area_cache = addr;
65641 mm->cached_hole_size = ~0UL;
65642 }
65643@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
65644 {
65645 struct vm_area_struct *vma;
65646 struct mm_struct *mm = current->mm;
65647- unsigned long addr = addr0;
65648+ unsigned long base = mm->mmap_base, addr = addr0;
65649
65650 /* requested length too big for entire address space */
65651 if (len > TASK_SIZE)
65652@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
65653 if (flags & MAP_FIXED)
65654 return addr;
65655
65656+#ifdef CONFIG_PAX_RANDMMAP
65657+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65658+#endif
65659+
65660 /* requesting a specific address */
65661 if (addr) {
65662 addr = PAGE_ALIGN(addr);
65663- vma = find_vma(mm, addr);
65664- if (TASK_SIZE - len >= addr &&
65665- (!vma || addr + len <= vma->vm_start))
65666- return addr;
65667+ if (TASK_SIZE - len >= addr) {
65668+ vma = find_vma(mm, addr);
65669+ if (check_heap_stack_gap(vma, addr, len))
65670+ return addr;
65671+ }
65672 }
65673
65674 /* check if free_area_cache is useful for us */
65675@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
65676 /* make sure it can fit in the remaining address space */
65677 if (addr > len) {
65678 vma = find_vma(mm, addr-len);
65679- if (!vma || addr <= vma->vm_start)
65680+ if (check_heap_stack_gap(vma, addr - len, len))
65681 /* remember the address as a hint for next time */
65682 return (mm->free_area_cache = addr-len);
65683 }
65684@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
65685 * return with success:
65686 */
65687 vma = find_vma(mm, addr);
65688- if (!vma || addr+len <= vma->vm_start)
65689+ if (check_heap_stack_gap(vma, addr, len))
65690 /* remember the address as a hint for next time */
65691 return (mm->free_area_cache = addr);
65692
65693@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
65694 mm->cached_hole_size = vma->vm_start - addr;
65695
65696 /* try just below the current vma->vm_start */
65697- addr = vma->vm_start-len;
65698- } while (len < vma->vm_start);
65699+ addr = skip_heap_stack_gap(vma, len);
65700+ } while (!IS_ERR_VALUE(addr));
65701
65702 bottomup:
65703 /*
65704@@ -1515,13 +1725,21 @@ bottomup:
65705 * can happen with large stack limits and large mmap()
65706 * allocations.
65707 */
65708+ mm->mmap_base = TASK_UNMAPPED_BASE;
65709+
65710+#ifdef CONFIG_PAX_RANDMMAP
65711+ if (mm->pax_flags & MF_PAX_RANDMMAP)
65712+ mm->mmap_base += mm->delta_mmap;
65713+#endif
65714+
65715+ mm->free_area_cache = mm->mmap_base;
65716 mm->cached_hole_size = ~0UL;
65717- mm->free_area_cache = TASK_UNMAPPED_BASE;
65718 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65719 /*
65720 * Restore the topdown base:
65721 */
65722- mm->free_area_cache = mm->mmap_base;
65723+ mm->mmap_base = base;
65724+ mm->free_area_cache = base;
65725 mm->cached_hole_size = ~0UL;
65726
65727 return addr;
65728@@ -1530,6 +1748,12 @@ bottomup:
65729
65730 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65731 {
65732+
65733+#ifdef CONFIG_PAX_SEGMEXEC
65734+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65735+ return;
65736+#endif
65737+
65738 /*
65739 * Is this a new hole at the highest possible address?
65740 */
65741@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
65742 mm->free_area_cache = addr;
65743
65744 /* dont allow allocations above current base */
65745- if (mm->free_area_cache > mm->mmap_base)
65746+ if (mm->free_area_cache > mm->mmap_base) {
65747 mm->free_area_cache = mm->mmap_base;
65748+ mm->cached_hole_size = ~0UL;
65749+ }
65750 }
65751
65752 unsigned long
65753@@ -1646,6 +1872,28 @@ out:
65754 return prev ? prev->vm_next : vma;
65755 }
65756
65757+#ifdef CONFIG_PAX_SEGMEXEC
65758+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65759+{
65760+ struct vm_area_struct *vma_m;
65761+
65762+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65763+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65764+ BUG_ON(vma->vm_mirror);
65765+ return NULL;
65766+ }
65767+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65768+ vma_m = vma->vm_mirror;
65769+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65770+ BUG_ON(vma->vm_file != vma_m->vm_file);
65771+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65772+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
65773+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
65774+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65775+ return vma_m;
65776+}
65777+#endif
65778+
65779 /*
65780 * Verify that the stack growth is acceptable and
65781 * update accounting. This is shared with both the
65782@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
65783 return -ENOMEM;
65784
65785 /* Stack limit test */
65786+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
65787 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
65788 return -ENOMEM;
65789
65790@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
65791 locked = mm->locked_vm + grow;
65792 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
65793 limit >>= PAGE_SHIFT;
65794+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65795 if (locked > limit && !capable(CAP_IPC_LOCK))
65796 return -ENOMEM;
65797 }
65798@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
65799 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65800 * vma is the last one with address > vma->vm_end. Have to extend vma.
65801 */
65802+#ifndef CONFIG_IA64
65803+static
65804+#endif
65805 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65806 {
65807 int error;
65808+ bool locknext;
65809
65810 if (!(vma->vm_flags & VM_GROWSUP))
65811 return -EFAULT;
65812
65813+ /* Also guard against wrapping around to address 0. */
65814+ if (address < PAGE_ALIGN(address+1))
65815+ address = PAGE_ALIGN(address+1);
65816+ else
65817+ return -ENOMEM;
65818+
65819 /*
65820 * We must make sure the anon_vma is allocated
65821 * so that the anon_vma locking is not a noop.
65822 */
65823 if (unlikely(anon_vma_prepare(vma)))
65824 return -ENOMEM;
65825+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65826+ if (locknext && anon_vma_prepare(vma->vm_next))
65827+ return -ENOMEM;
65828 vma_lock_anon_vma(vma);
65829+ if (locknext)
65830+ vma_lock_anon_vma(vma->vm_next);
65831
65832 /*
65833 * vma->vm_start/vm_end cannot change under us because the caller
65834 * is required to hold the mmap_sem in read mode. We need the
65835- * anon_vma lock to serialize against concurrent expand_stacks.
65836- * Also guard against wrapping around to address 0.
65837+ * anon_vma locks to serialize against concurrent expand_stacks
65838+ * and expand_upwards.
65839 */
65840- if (address < PAGE_ALIGN(address+4))
65841- address = PAGE_ALIGN(address+4);
65842- else {
65843- vma_unlock_anon_vma(vma);
65844- return -ENOMEM;
65845- }
65846 error = 0;
65847
65848 /* Somebody else might have raced and expanded it already */
65849- if (address > vma->vm_end) {
65850+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65851+ error = -ENOMEM;
65852+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65853 unsigned long size, grow;
65854
65855 size = address - vma->vm_start;
65856@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
65857 }
65858 }
65859 }
65860+ if (locknext)
65861+ vma_unlock_anon_vma(vma->vm_next);
65862 vma_unlock_anon_vma(vma);
65863 khugepaged_enter_vma_merge(vma);
65864 return error;
65865@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
65866 unsigned long address)
65867 {
65868 int error;
65869+ bool lockprev = false;
65870+ struct vm_area_struct *prev;
65871
65872 /*
65873 * We must make sure the anon_vma is allocated
65874@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
65875 if (error)
65876 return error;
65877
65878+ prev = vma->vm_prev;
65879+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65880+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65881+#endif
65882+ if (lockprev && anon_vma_prepare(prev))
65883+ return -ENOMEM;
65884+ if (lockprev)
65885+ vma_lock_anon_vma(prev);
65886+
65887 vma_lock_anon_vma(vma);
65888
65889 /*
65890@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
65891 */
65892
65893 /* Somebody else might have raced and expanded it already */
65894- if (address < vma->vm_start) {
65895+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65896+ error = -ENOMEM;
65897+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65898 unsigned long size, grow;
65899
65900+#ifdef CONFIG_PAX_SEGMEXEC
65901+ struct vm_area_struct *vma_m;
65902+
65903+ vma_m = pax_find_mirror_vma(vma);
65904+#endif
65905+
65906 size = vma->vm_end - address;
65907 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65908
65909@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
65910 if (!error) {
65911 vma->vm_start = address;
65912 vma->vm_pgoff -= grow;
65913+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65914+
65915+#ifdef CONFIG_PAX_SEGMEXEC
65916+ if (vma_m) {
65917+ vma_m->vm_start -= grow << PAGE_SHIFT;
65918+ vma_m->vm_pgoff -= grow;
65919+ }
65920+#endif
65921+
65922 perf_event_mmap(vma);
65923 }
65924 }
65925 }
65926 vma_unlock_anon_vma(vma);
65927+ if (lockprev)
65928+ vma_unlock_anon_vma(prev);
65929 khugepaged_enter_vma_merge(vma);
65930 return error;
65931 }
65932@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
65933 do {
65934 long nrpages = vma_pages(vma);
65935
65936+#ifdef CONFIG_PAX_SEGMEXEC
65937+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65938+ vma = remove_vma(vma);
65939+ continue;
65940+ }
65941+#endif
65942+
65943 mm->total_vm -= nrpages;
65944 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65945 vma = remove_vma(vma);
65946@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65947 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65948 vma->vm_prev = NULL;
65949 do {
65950+
65951+#ifdef CONFIG_PAX_SEGMEXEC
65952+ if (vma->vm_mirror) {
65953+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65954+ vma->vm_mirror->vm_mirror = NULL;
65955+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
65956+ vma->vm_mirror = NULL;
65957+ }
65958+#endif
65959+
65960 rb_erase(&vma->vm_rb, &mm->mm_rb);
65961 mm->map_count--;
65962 tail_vma = vma;
65963@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
65964 struct vm_area_struct *new;
65965 int err = -ENOMEM;
65966
65967+#ifdef CONFIG_PAX_SEGMEXEC
65968+ struct vm_area_struct *vma_m, *new_m = NULL;
65969+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65970+#endif
65971+
65972 if (is_vm_hugetlb_page(vma) && (addr &
65973 ~(huge_page_mask(hstate_vma(vma)))))
65974 return -EINVAL;
65975
65976+#ifdef CONFIG_PAX_SEGMEXEC
65977+ vma_m = pax_find_mirror_vma(vma);
65978+#endif
65979+
65980 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65981 if (!new)
65982 goto out_err;
65983
65984+#ifdef CONFIG_PAX_SEGMEXEC
65985+ if (vma_m) {
65986+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65987+ if (!new_m) {
65988+ kmem_cache_free(vm_area_cachep, new);
65989+ goto out_err;
65990+ }
65991+ }
65992+#endif
65993+
65994 /* most fields are the same, copy all, and then fixup */
65995 *new = *vma;
65996
65997@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
65998 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65999 }
66000
66001+#ifdef CONFIG_PAX_SEGMEXEC
66002+ if (vma_m) {
66003+ *new_m = *vma_m;
66004+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
66005+ new_m->vm_mirror = new;
66006+ new->vm_mirror = new_m;
66007+
66008+ if (new_below)
66009+ new_m->vm_end = addr_m;
66010+ else {
66011+ new_m->vm_start = addr_m;
66012+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
66013+ }
66014+ }
66015+#endif
66016+
66017 pol = mpol_dup(vma_policy(vma));
66018 if (IS_ERR(pol)) {
66019 err = PTR_ERR(pol);
66020@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
66021 else
66022 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
66023
66024+#ifdef CONFIG_PAX_SEGMEXEC
66025+ if (!err && vma_m) {
66026+ if (anon_vma_clone(new_m, vma_m))
66027+ goto out_free_mpol;
66028+
66029+ mpol_get(pol);
66030+ vma_set_policy(new_m, pol);
66031+
66032+ if (new_m->vm_file) {
66033+ get_file(new_m->vm_file);
66034+ if (vma_m->vm_flags & VM_EXECUTABLE)
66035+ added_exe_file_vma(mm);
66036+ }
66037+
66038+ if (new_m->vm_ops && new_m->vm_ops->open)
66039+ new_m->vm_ops->open(new_m);
66040+
66041+ if (new_below)
66042+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
66043+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
66044+ else
66045+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
66046+
66047+ if (err) {
66048+ if (new_m->vm_ops && new_m->vm_ops->close)
66049+ new_m->vm_ops->close(new_m);
66050+ if (new_m->vm_file) {
66051+ if (vma_m->vm_flags & VM_EXECUTABLE)
66052+ removed_exe_file_vma(mm);
66053+ fput(new_m->vm_file);
66054+ }
66055+ mpol_put(pol);
66056+ }
66057+ }
66058+#endif
66059+
66060 /* Success. */
66061 if (!err)
66062 return 0;
66063@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
66064 removed_exe_file_vma(mm);
66065 fput(new->vm_file);
66066 }
66067- unlink_anon_vmas(new);
66068 out_free_mpol:
66069 mpol_put(pol);
66070 out_free_vma:
66071+
66072+#ifdef CONFIG_PAX_SEGMEXEC
66073+ if (new_m) {
66074+ unlink_anon_vmas(new_m);
66075+ kmem_cache_free(vm_area_cachep, new_m);
66076+ }
66077+#endif
66078+
66079+ unlink_anon_vmas(new);
66080 kmem_cache_free(vm_area_cachep, new);
66081 out_err:
66082 return err;
66083@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
66084 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
66085 unsigned long addr, int new_below)
66086 {
66087+
66088+#ifdef CONFIG_PAX_SEGMEXEC
66089+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
66090+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
66091+ if (mm->map_count >= sysctl_max_map_count-1)
66092+ return -ENOMEM;
66093+ } else
66094+#endif
66095+
66096 if (mm->map_count >= sysctl_max_map_count)
66097 return -ENOMEM;
66098
66099@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
66100 * work. This now handles partial unmappings.
66101 * Jeremy Fitzhardinge <jeremy@goop.org>
66102 */
66103+#ifdef CONFIG_PAX_SEGMEXEC
66104 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66105 {
66106+ int ret = __do_munmap(mm, start, len);
66107+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
66108+ return ret;
66109+
66110+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
66111+}
66112+
66113+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66114+#else
66115+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66116+#endif
66117+{
66118 unsigned long end;
66119 struct vm_area_struct *vma, *prev, *last;
66120
66121+ /*
66122+ * mm->mmap_sem is required to protect against another thread
66123+ * changing the mappings in case we sleep.
66124+ */
66125+ verify_mm_writelocked(mm);
66126+
66127 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
66128 return -EINVAL;
66129
66130@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
66131 /* Fix up all other VM information */
66132 remove_vma_list(mm, vma);
66133
66134+ track_exec_limit(mm, start, end, 0UL);
66135+
66136 return 0;
66137 }
66138
66139@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
66140
66141 profile_munmap(addr);
66142
66143+#ifdef CONFIG_PAX_SEGMEXEC
66144+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
66145+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
66146+ return -EINVAL;
66147+#endif
66148+
66149 down_write(&mm->mmap_sem);
66150 ret = do_munmap(mm, addr, len);
66151 up_write(&mm->mmap_sem);
66152 return ret;
66153 }
66154
66155-static inline void verify_mm_writelocked(struct mm_struct *mm)
66156-{
66157-#ifdef CONFIG_DEBUG_VM
66158- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
66159- WARN_ON(1);
66160- up_read(&mm->mmap_sem);
66161- }
66162-#endif
66163-}
66164-
66165 /*
66166 * this is really a simplified "do_mmap". it only handles
66167 * anonymous maps. eventually we may be able to do some
66168@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
66169 struct rb_node ** rb_link, * rb_parent;
66170 pgoff_t pgoff = addr >> PAGE_SHIFT;
66171 int error;
66172+ unsigned long charged;
66173
66174 len = PAGE_ALIGN(len);
66175 if (!len)
66176@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
66177
66178 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
66179
66180+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66181+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66182+ flags &= ~VM_EXEC;
66183+
66184+#ifdef CONFIG_PAX_MPROTECT
66185+ if (mm->pax_flags & MF_PAX_MPROTECT)
66186+ flags &= ~VM_MAYEXEC;
66187+#endif
66188+
66189+ }
66190+#endif
66191+
66192 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
66193 if (error & ~PAGE_MASK)
66194 return error;
66195
66196+ charged = len >> PAGE_SHIFT;
66197+
66198 /*
66199 * mlock MCL_FUTURE?
66200 */
66201 if (mm->def_flags & VM_LOCKED) {
66202 unsigned long locked, lock_limit;
66203- locked = len >> PAGE_SHIFT;
66204+ locked = charged;
66205 locked += mm->locked_vm;
66206 lock_limit = rlimit(RLIMIT_MEMLOCK);
66207 lock_limit >>= PAGE_SHIFT;
66208@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
66209 /*
66210 * Clear old maps. this also does some error checking for us
66211 */
66212- munmap_back:
66213 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66214 if (vma && vma->vm_start < addr + len) {
66215 if (do_munmap(mm, addr, len))
66216 return -ENOMEM;
66217- goto munmap_back;
66218+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66219+ BUG_ON(vma && vma->vm_start < addr + len);
66220 }
66221
66222 /* Check against address space limits *after* clearing old maps... */
66223- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
66224+ if (!may_expand_vm(mm, charged))
66225 return -ENOMEM;
66226
66227 if (mm->map_count > sysctl_max_map_count)
66228 return -ENOMEM;
66229
66230- if (security_vm_enough_memory(len >> PAGE_SHIFT))
66231+ if (security_vm_enough_memory(charged))
66232 return -ENOMEM;
66233
66234 /* Can we just expand an old private anonymous mapping? */
66235@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
66236 */
66237 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66238 if (!vma) {
66239- vm_unacct_memory(len >> PAGE_SHIFT);
66240+ vm_unacct_memory(charged);
66241 return -ENOMEM;
66242 }
66243
66244@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
66245 vma_link(mm, vma, prev, rb_link, rb_parent);
66246 out:
66247 perf_event_mmap(vma);
66248- mm->total_vm += len >> PAGE_SHIFT;
66249+ mm->total_vm += charged;
66250 if (flags & VM_LOCKED) {
66251 if (!mlock_vma_pages_range(vma, addr, addr + len))
66252- mm->locked_vm += (len >> PAGE_SHIFT);
66253+ mm->locked_vm += charged;
66254 }
66255+ track_exec_limit(mm, addr, addr + len, flags);
66256 return addr;
66257 }
66258
66259@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
66260 * Walk the list again, actually closing and freeing it,
66261 * with preemption enabled, without holding any MM locks.
66262 */
66263- while (vma)
66264+ while (vma) {
66265+ vma->vm_mirror = NULL;
66266 vma = remove_vma(vma);
66267+ }
66268
66269 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
66270 }
66271@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
66272 struct vm_area_struct * __vma, * prev;
66273 struct rb_node ** rb_link, * rb_parent;
66274
66275+#ifdef CONFIG_PAX_SEGMEXEC
66276+ struct vm_area_struct *vma_m = NULL;
66277+#endif
66278+
66279+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
66280+ return -EPERM;
66281+
66282 /*
66283 * The vm_pgoff of a purely anonymous vma should be irrelevant
66284 * until its first write fault, when page's anon_vma and index
66285@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
66286 if ((vma->vm_flags & VM_ACCOUNT) &&
66287 security_vm_enough_memory_mm(mm, vma_pages(vma)))
66288 return -ENOMEM;
66289+
66290+#ifdef CONFIG_PAX_SEGMEXEC
66291+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
66292+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66293+ if (!vma_m)
66294+ return -ENOMEM;
66295+ }
66296+#endif
66297+
66298 vma_link(mm, vma, prev, rb_link, rb_parent);
66299+
66300+#ifdef CONFIG_PAX_SEGMEXEC
66301+ if (vma_m)
66302+ BUG_ON(pax_mirror_vma(vma_m, vma));
66303+#endif
66304+
66305 return 0;
66306 }
66307
66308@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
66309 struct rb_node **rb_link, *rb_parent;
66310 struct mempolicy *pol;
66311
66312+ BUG_ON(vma->vm_mirror);
66313+
66314 /*
66315 * If anonymous vma has not yet been faulted, update new pgoff
66316 * to match new location, to increase its chance of merging.
66317@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
66318 return NULL;
66319 }
66320
66321+#ifdef CONFIG_PAX_SEGMEXEC
66322+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
66323+{
66324+ struct vm_area_struct *prev_m;
66325+ struct rb_node **rb_link_m, *rb_parent_m;
66326+ struct mempolicy *pol_m;
66327+
66328+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
66329+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
66330+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
66331+ *vma_m = *vma;
66332+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
66333+ if (anon_vma_clone(vma_m, vma))
66334+ return -ENOMEM;
66335+ pol_m = vma_policy(vma_m);
66336+ mpol_get(pol_m);
66337+ vma_set_policy(vma_m, pol_m);
66338+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
66339+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
66340+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
66341+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
66342+ if (vma_m->vm_file)
66343+ get_file(vma_m->vm_file);
66344+ if (vma_m->vm_ops && vma_m->vm_ops->open)
66345+ vma_m->vm_ops->open(vma_m);
66346+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
66347+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
66348+ vma_m->vm_mirror = vma;
66349+ vma->vm_mirror = vma_m;
66350+ return 0;
66351+}
66352+#endif
66353+
66354 /*
66355 * Return true if the calling process may expand its vm space by the passed
66356 * number of pages
66357@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
66358 unsigned long lim;
66359
66360 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
66361-
66362+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
66363 if (cur + npages > lim)
66364 return 0;
66365 return 1;
66366@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
66367 vma->vm_start = addr;
66368 vma->vm_end = addr + len;
66369
66370+#ifdef CONFIG_PAX_MPROTECT
66371+ if (mm->pax_flags & MF_PAX_MPROTECT) {
66372+#ifndef CONFIG_PAX_MPROTECT_COMPAT
66373+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
66374+ return -EPERM;
66375+ if (!(vm_flags & VM_EXEC))
66376+ vm_flags &= ~VM_MAYEXEC;
66377+#else
66378+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66379+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66380+#endif
66381+ else
66382+ vm_flags &= ~VM_MAYWRITE;
66383+ }
66384+#endif
66385+
66386 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
66387 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66388
66389diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
66390--- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
66391+++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
66392@@ -23,10 +23,16 @@
66393 #include <linux/mmu_notifier.h>
66394 #include <linux/migrate.h>
66395 #include <linux/perf_event.h>
66396+
66397+#ifdef CONFIG_PAX_MPROTECT
66398+#include <linux/elf.h>
66399+#endif
66400+
66401 #include <asm/uaccess.h>
66402 #include <asm/pgtable.h>
66403 #include <asm/cacheflush.h>
66404 #include <asm/tlbflush.h>
66405+#include <asm/mmu_context.h>
66406
66407 #ifndef pgprot_modify
66408 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
66409@@ -141,6 +147,48 @@ static void change_protection(struct vm_
66410 flush_tlb_range(vma, start, end);
66411 }
66412
66413+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66414+/* called while holding the mmap semaphor for writing except stack expansion */
66415+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
66416+{
66417+ unsigned long oldlimit, newlimit = 0UL;
66418+
66419+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
66420+ return;
66421+
66422+ spin_lock(&mm->page_table_lock);
66423+ oldlimit = mm->context.user_cs_limit;
66424+ if ((prot & VM_EXEC) && oldlimit < end)
66425+ /* USER_CS limit moved up */
66426+ newlimit = end;
66427+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
66428+ /* USER_CS limit moved down */
66429+ newlimit = start;
66430+
66431+ if (newlimit) {
66432+ mm->context.user_cs_limit = newlimit;
66433+
66434+#ifdef CONFIG_SMP
66435+ wmb();
66436+ cpus_clear(mm->context.cpu_user_cs_mask);
66437+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
66438+#endif
66439+
66440+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
66441+ }
66442+ spin_unlock(&mm->page_table_lock);
66443+ if (newlimit == end) {
66444+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
66445+
66446+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
66447+ if (is_vm_hugetlb_page(vma))
66448+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
66449+ else
66450+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
66451+ }
66452+}
66453+#endif
66454+
66455 int
66456 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
66457 unsigned long start, unsigned long end, unsigned long newflags)
66458@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
66459 int error;
66460 int dirty_accountable = 0;
66461
66462+#ifdef CONFIG_PAX_SEGMEXEC
66463+ struct vm_area_struct *vma_m = NULL;
66464+ unsigned long start_m, end_m;
66465+
66466+ start_m = start + SEGMEXEC_TASK_SIZE;
66467+ end_m = end + SEGMEXEC_TASK_SIZE;
66468+#endif
66469+
66470 if (newflags == oldflags) {
66471 *pprev = vma;
66472 return 0;
66473 }
66474
66475+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
66476+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
66477+
66478+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
66479+ return -ENOMEM;
66480+
66481+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
66482+ return -ENOMEM;
66483+ }
66484+
66485 /*
66486 * If we make a private mapping writable we increase our commit;
66487 * but (without finer accounting) cannot reduce our commit if we
66488@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
66489 }
66490 }
66491
66492+#ifdef CONFIG_PAX_SEGMEXEC
66493+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
66494+ if (start != vma->vm_start) {
66495+ error = split_vma(mm, vma, start, 1);
66496+ if (error)
66497+ goto fail;
66498+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
66499+ *pprev = (*pprev)->vm_next;
66500+ }
66501+
66502+ if (end != vma->vm_end) {
66503+ error = split_vma(mm, vma, end, 0);
66504+ if (error)
66505+ goto fail;
66506+ }
66507+
66508+ if (pax_find_mirror_vma(vma)) {
66509+ error = __do_munmap(mm, start_m, end_m - start_m);
66510+ if (error)
66511+ goto fail;
66512+ } else {
66513+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66514+ if (!vma_m) {
66515+ error = -ENOMEM;
66516+ goto fail;
66517+ }
66518+ vma->vm_flags = newflags;
66519+ error = pax_mirror_vma(vma_m, vma);
66520+ if (error) {
66521+ vma->vm_flags = oldflags;
66522+ goto fail;
66523+ }
66524+ }
66525+ }
66526+#endif
66527+
66528 /*
66529 * First try to merge with previous and/or next vma.
66530 */
66531@@ -204,9 +306,21 @@ success:
66532 * vm_flags and vm_page_prot are protected by the mmap_sem
66533 * held in write mode.
66534 */
66535+
66536+#ifdef CONFIG_PAX_SEGMEXEC
66537+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
66538+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
66539+#endif
66540+
66541 vma->vm_flags = newflags;
66542+
66543+#ifdef CONFIG_PAX_MPROTECT
66544+ if (mm->binfmt && mm->binfmt->handle_mprotect)
66545+ mm->binfmt->handle_mprotect(vma, newflags);
66546+#endif
66547+
66548 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66549- vm_get_page_prot(newflags));
66550+ vm_get_page_prot(vma->vm_flags));
66551
66552 if (vma_wants_writenotify(vma)) {
66553 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66554@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66555 end = start + len;
66556 if (end <= start)
66557 return -ENOMEM;
66558+
66559+#ifdef CONFIG_PAX_SEGMEXEC
66560+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66561+ if (end > SEGMEXEC_TASK_SIZE)
66562+ return -EINVAL;
66563+ } else
66564+#endif
66565+
66566+ if (end > TASK_SIZE)
66567+ return -EINVAL;
66568+
66569 if (!arch_validate_prot(prot))
66570 return -EINVAL;
66571
66572@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66573 /*
66574 * Does the application expect PROT_READ to imply PROT_EXEC:
66575 */
66576- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66577+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66578 prot |= PROT_EXEC;
66579
66580 vm_flags = calc_vm_prot_bits(prot);
66581@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66582 if (start > vma->vm_start)
66583 prev = vma;
66584
66585+#ifdef CONFIG_PAX_MPROTECT
66586+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66587+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
66588+#endif
66589+
66590 for (nstart = start ; ; ) {
66591 unsigned long newflags;
66592
66593@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66594
66595 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66596 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66597+ if (prot & (PROT_WRITE | PROT_EXEC))
66598+ gr_log_rwxmprotect(vma->vm_file);
66599+
66600+ error = -EACCES;
66601+ goto out;
66602+ }
66603+
66604+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66605 error = -EACCES;
66606 goto out;
66607 }
66608@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66609 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66610 if (error)
66611 goto out;
66612+
66613+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
66614+
66615 nstart = tmp;
66616
66617 if (nstart < prev->vm_end)
66618diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
66619--- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
66620+++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
66621@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
66622 continue;
66623 pte = ptep_clear_flush(vma, old_addr, old_pte);
66624 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66625+
66626+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66627+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66628+ pte = pte_exprotect(pte);
66629+#endif
66630+
66631 set_pte_at(mm, new_addr, new_pte, pte);
66632 }
66633
66634@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
66635 if (is_vm_hugetlb_page(vma))
66636 goto Einval;
66637
66638+#ifdef CONFIG_PAX_SEGMEXEC
66639+ if (pax_find_mirror_vma(vma))
66640+ goto Einval;
66641+#endif
66642+
66643 /* We can't remap across vm area boundaries */
66644 if (old_len > vma->vm_end - addr)
66645 goto Efault;
66646@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
66647 unsigned long ret = -EINVAL;
66648 unsigned long charged = 0;
66649 unsigned long map_flags;
66650+ unsigned long pax_task_size = TASK_SIZE;
66651
66652 if (new_addr & ~PAGE_MASK)
66653 goto out;
66654
66655- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66656+#ifdef CONFIG_PAX_SEGMEXEC
66657+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66658+ pax_task_size = SEGMEXEC_TASK_SIZE;
66659+#endif
66660+
66661+ pax_task_size -= PAGE_SIZE;
66662+
66663+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66664 goto out;
66665
66666 /* Check if the location we're moving into overlaps the
66667 * old location at all, and fail if it does.
66668 */
66669- if ((new_addr <= addr) && (new_addr+new_len) > addr)
66670- goto out;
66671-
66672- if ((addr <= new_addr) && (addr+old_len) > new_addr)
66673+ if (addr + old_len > new_addr && new_addr + new_len > addr)
66674 goto out;
66675
66676 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66677@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
66678 struct vm_area_struct *vma;
66679 unsigned long ret = -EINVAL;
66680 unsigned long charged = 0;
66681+ unsigned long pax_task_size = TASK_SIZE;
66682
66683 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66684 goto out;
66685@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
66686 if (!new_len)
66687 goto out;
66688
66689+#ifdef CONFIG_PAX_SEGMEXEC
66690+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66691+ pax_task_size = SEGMEXEC_TASK_SIZE;
66692+#endif
66693+
66694+ pax_task_size -= PAGE_SIZE;
66695+
66696+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66697+ old_len > pax_task_size || addr > pax_task_size-old_len)
66698+ goto out;
66699+
66700 if (flags & MREMAP_FIXED) {
66701 if (flags & MREMAP_MAYMOVE)
66702 ret = mremap_to(addr, old_len, new_addr, new_len);
66703@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
66704 addr + new_len);
66705 }
66706 ret = addr;
66707+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66708 goto out;
66709 }
66710 }
66711@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
66712 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66713 if (ret)
66714 goto out;
66715+
66716+ map_flags = vma->vm_flags;
66717 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66718+ if (!(ret & ~PAGE_MASK)) {
66719+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66720+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66721+ }
66722 }
66723 out:
66724 if (ret & ~PAGE_MASK)
66725diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
66726--- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
66727+++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
66728@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
66729 unsigned long __init free_all_memory_core_early(int nodeid)
66730 {
66731 int i;
66732- u64 start, end;
66733+ u64 start, end, startrange, endrange;
66734 unsigned long count = 0;
66735- struct range *range = NULL;
66736+ struct range *range = NULL, rangerange = { 0, 0 };
66737 int nr_range;
66738
66739 nr_range = get_free_all_memory_range(&range, nodeid);
66740+ startrange = __pa(range) >> PAGE_SHIFT;
66741+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
66742
66743 for (i = 0; i < nr_range; i++) {
66744 start = range[i].start;
66745 end = range[i].end;
66746+ if (start <= endrange && startrange < end) {
66747+ BUG_ON(rangerange.start | rangerange.end);
66748+ rangerange = range[i];
66749+ continue;
66750+ }
66751 count += end - start;
66752 __free_pages_memory(start, end);
66753 }
66754+ start = rangerange.start;
66755+ end = rangerange.end;
66756+ count += end - start;
66757+ __free_pages_memory(start, end);
66758
66759 return count;
66760 }
66761diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
66762--- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
66763+++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
66764@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66765 int sysctl_overcommit_ratio = 50; /* default is 50% */
66766 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66767 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66768-int heap_stack_gap = 0;
66769
66770 atomic_long_t mmap_pages_allocated;
66771
66772@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
66773 EXPORT_SYMBOL(find_vma);
66774
66775 /*
66776- * find a VMA
66777- * - we don't extend stack VMAs under NOMMU conditions
66778- */
66779-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66780-{
66781- return find_vma(mm, addr);
66782-}
66783-
66784-/*
66785 * expand a stack to a given address
66786 * - not supported under NOMMU conditions
66787 */
66788@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
66789
66790 /* most fields are the same, copy all, and then fixup */
66791 *new = *vma;
66792+ INIT_LIST_HEAD(&new->anon_vma_chain);
66793 *region = *vma->vm_region;
66794 new->vm_region = region;
66795
66796diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
66797--- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
66798+++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
66799@@ -340,7 +340,7 @@ out:
66800 * This usage means that zero-order pages may not be compound.
66801 */
66802
66803-static void free_compound_page(struct page *page)
66804+void free_compound_page(struct page *page)
66805 {
66806 __free_pages_ok(page, compound_order(page));
66807 }
66808@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
66809 int i;
66810 int bad = 0;
66811
66812+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66813+ unsigned long index = 1UL << order;
66814+#endif
66815+
66816 trace_mm_page_free_direct(page, order);
66817 kmemcheck_free_shadow(page, order);
66818
66819@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
66820 debug_check_no_obj_freed(page_address(page),
66821 PAGE_SIZE << order);
66822 }
66823+
66824+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66825+ for (; index; --index)
66826+ sanitize_highpage(page + index - 1);
66827+#endif
66828+
66829 arch_free_page(page, order);
66830 kernel_map_pages(page, 1 << order, 0);
66831
66832@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
66833 arch_alloc_page(page, order);
66834 kernel_map_pages(page, 1 << order, 1);
66835
66836+#ifndef CONFIG_PAX_MEMORY_SANITIZE
66837 if (gfp_flags & __GFP_ZERO)
66838 prep_zero_page(page, order, gfp_flags);
66839+#endif
66840
66841 if (order && (gfp_flags & __GFP_COMP))
66842 prep_compound_page(page, order);
66843@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
66844 int cpu;
66845 struct zone *zone;
66846
66847+ pax_track_stack();
66848+
66849 for_each_populated_zone(zone) {
66850 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66851 continue;
66852diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
66853--- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
66854+++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
66855@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
66856 static unsigned int pcpu_last_unit_cpu __read_mostly;
66857
66858 /* the address of the first chunk which starts with the kernel static area */
66859-void *pcpu_base_addr __read_mostly;
66860+void *pcpu_base_addr __read_only;
66861 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66862
66863 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66864diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
66865--- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
66866+++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
66867@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
66868 struct anon_vma *anon_vma = vma->anon_vma;
66869 struct anon_vma_chain *avc;
66870
66871+#ifdef CONFIG_PAX_SEGMEXEC
66872+ struct anon_vma_chain *avc_m = NULL;
66873+#endif
66874+
66875 might_sleep();
66876 if (unlikely(!anon_vma)) {
66877 struct mm_struct *mm = vma->vm_mm;
66878@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
66879 if (!avc)
66880 goto out_enomem;
66881
66882+#ifdef CONFIG_PAX_SEGMEXEC
66883+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
66884+ if (!avc_m)
66885+ goto out_enomem_free_avc;
66886+#endif
66887+
66888 anon_vma = find_mergeable_anon_vma(vma);
66889 allocated = NULL;
66890 if (!anon_vma) {
66891@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
66892 /* page_table_lock to protect against threads */
66893 spin_lock(&mm->page_table_lock);
66894 if (likely(!vma->anon_vma)) {
66895+
66896+#ifdef CONFIG_PAX_SEGMEXEC
66897+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66898+
66899+ if (vma_m) {
66900+ BUG_ON(vma_m->anon_vma);
66901+ vma_m->anon_vma = anon_vma;
66902+ avc_m->anon_vma = anon_vma;
66903+ avc_m->vma = vma;
66904+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
66905+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
66906+ avc_m = NULL;
66907+ }
66908+#endif
66909+
66910 vma->anon_vma = anon_vma;
66911 avc->anon_vma = anon_vma;
66912 avc->vma = vma;
66913@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
66914
66915 if (unlikely(allocated))
66916 put_anon_vma(allocated);
66917+
66918+#ifdef CONFIG_PAX_SEGMEXEC
66919+ if (unlikely(avc_m))
66920+ anon_vma_chain_free(avc_m);
66921+#endif
66922+
66923 if (unlikely(avc))
66924 anon_vma_chain_free(avc);
66925 }
66926 return 0;
66927
66928 out_enomem_free_avc:
66929+
66930+#ifdef CONFIG_PAX_SEGMEXEC
66931+ if (avc_m)
66932+ anon_vma_chain_free(avc_m);
66933+#endif
66934+
66935 anon_vma_chain_free(avc);
66936 out_enomem:
66937 return -ENOMEM;
66938@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
66939 * Attach the anon_vmas from src to dst.
66940 * Returns 0 on success, -ENOMEM on failure.
66941 */
66942-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
66943+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
66944 {
66945 struct anon_vma_chain *avc, *pavc;
66946 struct anon_vma *root = NULL;
66947@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
66948 * the corresponding VMA in the parent process is attached to.
66949 * Returns 0 on success, non-zero on failure.
66950 */
66951-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
66952+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
66953 {
66954 struct anon_vma_chain *avc;
66955 struct anon_vma *anon_vma;
66956diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
66957--- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
66958+++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
66959@@ -31,7 +31,7 @@
66960 #include <linux/percpu_counter.h>
66961 #include <linux/swap.h>
66962
66963-static struct vfsmount *shm_mnt;
66964+struct vfsmount *shm_mnt;
66965
66966 #ifdef CONFIG_SHMEM
66967 /*
66968@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
66969 goto unlock;
66970 }
66971 entry = shmem_swp_entry(info, index, NULL);
66972+ if (!entry)
66973+ goto unlock;
66974 if (entry->val) {
66975 /*
66976 * The more uptodate page coming down from a stacked
66977@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
66978 struct vm_area_struct pvma;
66979 struct page *page;
66980
66981+ pax_track_stack();
66982+
66983 spol = mpol_cond_copy(&mpol,
66984 mpol_shared_policy_lookup(&info->policy, idx));
66985
66986@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
66987 int err = -ENOMEM;
66988
66989 /* Round up to L1_CACHE_BYTES to resist false sharing */
66990- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66991- L1_CACHE_BYTES), GFP_KERNEL);
66992+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66993 if (!sbinfo)
66994 return -ENOMEM;
66995
66996diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
66997--- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
66998+++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
66999@@ -151,7 +151,7 @@
67000
67001 /* Legal flag mask for kmem_cache_create(). */
67002 #if DEBUG
67003-# define CREATE_MASK (SLAB_RED_ZONE | \
67004+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
67005 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
67006 SLAB_CACHE_DMA | \
67007 SLAB_STORE_USER | \
67008@@ -159,7 +159,7 @@
67009 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
67010 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
67011 #else
67012-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
67013+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
67014 SLAB_CACHE_DMA | \
67015 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
67016 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
67017@@ -288,7 +288,7 @@ struct kmem_list3 {
67018 * Need this for bootstrapping a per node allocator.
67019 */
67020 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
67021-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
67022+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
67023 #define CACHE_CACHE 0
67024 #define SIZE_AC MAX_NUMNODES
67025 #define SIZE_L3 (2 * MAX_NUMNODES)
67026@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
67027 if ((x)->max_freeable < i) \
67028 (x)->max_freeable = i; \
67029 } while (0)
67030-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
67031-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
67032-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
67033-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
67034+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
67035+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
67036+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
67037+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
67038 #else
67039 #define STATS_INC_ACTIVE(x) do { } while (0)
67040 #define STATS_DEC_ACTIVE(x) do { } while (0)
67041@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
67042 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
67043 */
67044 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
67045- const struct slab *slab, void *obj)
67046+ const struct slab *slab, const void *obj)
67047 {
67048 u32 offset = (obj - slab->s_mem);
67049 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
67050@@ -564,7 +564,7 @@ struct cache_names {
67051 static struct cache_names __initdata cache_names[] = {
67052 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
67053 #include <linux/kmalloc_sizes.h>
67054- {NULL,}
67055+ {NULL}
67056 #undef CACHE
67057 };
67058
67059@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
67060 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
67061 sizes[INDEX_AC].cs_size,
67062 ARCH_KMALLOC_MINALIGN,
67063- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
67064+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
67065 NULL);
67066
67067 if (INDEX_AC != INDEX_L3) {
67068@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
67069 kmem_cache_create(names[INDEX_L3].name,
67070 sizes[INDEX_L3].cs_size,
67071 ARCH_KMALLOC_MINALIGN,
67072- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
67073+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
67074 NULL);
67075 }
67076
67077@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
67078 sizes->cs_cachep = kmem_cache_create(names->name,
67079 sizes->cs_size,
67080 ARCH_KMALLOC_MINALIGN,
67081- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
67082+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
67083 NULL);
67084 }
67085 #ifdef CONFIG_ZONE_DMA
67086@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
67087 }
67088 /* cpu stats */
67089 {
67090- unsigned long allochit = atomic_read(&cachep->allochit);
67091- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
67092- unsigned long freehit = atomic_read(&cachep->freehit);
67093- unsigned long freemiss = atomic_read(&cachep->freemiss);
67094+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
67095+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
67096+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
67097+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
67098
67099 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
67100 allochit, allocmiss, freehit, freemiss);
67101@@ -4532,15 +4532,66 @@ static const struct file_operations proc
67102
67103 static int __init slab_proc_init(void)
67104 {
67105- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
67106+ mode_t gr_mode = S_IRUGO;
67107+
67108+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67109+ gr_mode = S_IRUSR;
67110+#endif
67111+
67112+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
67113 #ifdef CONFIG_DEBUG_SLAB_LEAK
67114- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
67115+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
67116 #endif
67117 return 0;
67118 }
67119 module_init(slab_proc_init);
67120 #endif
67121
67122+void check_object_size(const void *ptr, unsigned long n, bool to)
67123+{
67124+
67125+#ifdef CONFIG_PAX_USERCOPY
67126+ struct page *page;
67127+ struct kmem_cache *cachep = NULL;
67128+ struct slab *slabp;
67129+ unsigned int objnr;
67130+ unsigned long offset;
67131+
67132+ if (!n)
67133+ return;
67134+
67135+ if (ZERO_OR_NULL_PTR(ptr))
67136+ goto report;
67137+
67138+ if (!virt_addr_valid(ptr))
67139+ return;
67140+
67141+ page = virt_to_head_page(ptr);
67142+
67143+ if (!PageSlab(page)) {
67144+ if (object_is_on_stack(ptr, n) == -1)
67145+ goto report;
67146+ return;
67147+ }
67148+
67149+ cachep = page_get_cache(page);
67150+ if (!(cachep->flags & SLAB_USERCOPY))
67151+ goto report;
67152+
67153+ slabp = page_get_slab(page);
67154+ objnr = obj_to_index(cachep, slabp, ptr);
67155+ BUG_ON(objnr >= cachep->num);
67156+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
67157+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
67158+ return;
67159+
67160+report:
67161+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
67162+#endif
67163+
67164+}
67165+EXPORT_SYMBOL(check_object_size);
67166+
67167 /**
67168 * ksize - get the actual amount of memory allocated for a given object
67169 * @objp: Pointer to the object
67170diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
67171--- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
67172+++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
67173@@ -29,7 +29,7 @@
67174 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
67175 * alloc_pages() directly, allocating compound pages so the page order
67176 * does not have to be separately tracked, and also stores the exact
67177- * allocation size in page->private so that it can be used to accurately
67178+ * allocation size in slob_page->size so that it can be used to accurately
67179 * provide ksize(). These objects are detected in kfree() because slob_page()
67180 * is false for them.
67181 *
67182@@ -58,6 +58,7 @@
67183 */
67184
67185 #include <linux/kernel.h>
67186+#include <linux/sched.h>
67187 #include <linux/slab.h>
67188 #include <linux/mm.h>
67189 #include <linux/swap.h> /* struct reclaim_state */
67190@@ -102,7 +103,8 @@ struct slob_page {
67191 unsigned long flags; /* mandatory */
67192 atomic_t _count; /* mandatory */
67193 slobidx_t units; /* free units left in page */
67194- unsigned long pad[2];
67195+ unsigned long pad[1];
67196+ unsigned long size; /* size when >=PAGE_SIZE */
67197 slob_t *free; /* first free slob_t in page */
67198 struct list_head list; /* linked list of free pages */
67199 };
67200@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
67201 */
67202 static inline int is_slob_page(struct slob_page *sp)
67203 {
67204- return PageSlab((struct page *)sp);
67205+ return PageSlab((struct page *)sp) && !sp->size;
67206 }
67207
67208 static inline void set_slob_page(struct slob_page *sp)
67209@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
67210
67211 static inline struct slob_page *slob_page(const void *addr)
67212 {
67213- return (struct slob_page *)virt_to_page(addr);
67214+ return (struct slob_page *)virt_to_head_page(addr);
67215 }
67216
67217 /*
67218@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
67219 /*
67220 * Return the size of a slob block.
67221 */
67222-static slobidx_t slob_units(slob_t *s)
67223+static slobidx_t slob_units(const slob_t *s)
67224 {
67225 if (s->units > 0)
67226 return s->units;
67227@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
67228 /*
67229 * Return the next free slob block pointer after this one.
67230 */
67231-static slob_t *slob_next(slob_t *s)
67232+static slob_t *slob_next(const slob_t *s)
67233 {
67234 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
67235 slobidx_t next;
67236@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
67237 /*
67238 * Returns true if s is the last free block in its page.
67239 */
67240-static int slob_last(slob_t *s)
67241+static int slob_last(const slob_t *s)
67242 {
67243 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
67244 }
67245@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
67246 if (!page)
67247 return NULL;
67248
67249+ set_slob_page(page);
67250 return page_address(page);
67251 }
67252
67253@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
67254 if (!b)
67255 return NULL;
67256 sp = slob_page(b);
67257- set_slob_page(sp);
67258
67259 spin_lock_irqsave(&slob_lock, flags);
67260 sp->units = SLOB_UNITS(PAGE_SIZE);
67261 sp->free = b;
67262+ sp->size = 0;
67263 INIT_LIST_HEAD(&sp->list);
67264 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
67265 set_slob_page_free(sp, slob_list);
67266@@ -476,10 +479,9 @@ out:
67267 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
67268 */
67269
67270-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67271+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
67272 {
67273- unsigned int *m;
67274- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67275+ slob_t *m;
67276 void *ret;
67277
67278 lockdep_trace_alloc(gfp);
67279@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
67280
67281 if (!m)
67282 return NULL;
67283- *m = size;
67284+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
67285+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
67286+ m[0].units = size;
67287+ m[1].units = align;
67288 ret = (void *)m + align;
67289
67290 trace_kmalloc_node(_RET_IP_, ret,
67291@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
67292 gfp |= __GFP_COMP;
67293 ret = slob_new_pages(gfp, order, node);
67294 if (ret) {
67295- struct page *page;
67296- page = virt_to_page(ret);
67297- page->private = size;
67298+ struct slob_page *sp;
67299+ sp = slob_page(ret);
67300+ sp->size = size;
67301 }
67302
67303 trace_kmalloc_node(_RET_IP_, ret,
67304 size, PAGE_SIZE << order, gfp, node);
67305 }
67306
67307- kmemleak_alloc(ret, size, 1, gfp);
67308+ return ret;
67309+}
67310+
67311+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67312+{
67313+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67314+ void *ret = __kmalloc_node_align(size, gfp, node, align);
67315+
67316+ if (!ZERO_OR_NULL_PTR(ret))
67317+ kmemleak_alloc(ret, size, 1, gfp);
67318 return ret;
67319 }
67320 EXPORT_SYMBOL(__kmalloc_node);
67321@@ -531,13 +545,88 @@ void kfree(const void *block)
67322 sp = slob_page(block);
67323 if (is_slob_page(sp)) {
67324 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67325- unsigned int *m = (unsigned int *)(block - align);
67326- slob_free(m, *m + align);
67327- } else
67328+ slob_t *m = (slob_t *)(block - align);
67329+ slob_free(m, m[0].units + align);
67330+ } else {
67331+ clear_slob_page(sp);
67332+ free_slob_page(sp);
67333+ sp->size = 0;
67334 put_page(&sp->page);
67335+ }
67336 }
67337 EXPORT_SYMBOL(kfree);
67338
67339+void check_object_size(const void *ptr, unsigned long n, bool to)
67340+{
67341+
67342+#ifdef CONFIG_PAX_USERCOPY
67343+ struct slob_page *sp;
67344+ const slob_t *free;
67345+ const void *base;
67346+ unsigned long flags;
67347+
67348+ if (!n)
67349+ return;
67350+
67351+ if (ZERO_OR_NULL_PTR(ptr))
67352+ goto report;
67353+
67354+ if (!virt_addr_valid(ptr))
67355+ return;
67356+
67357+ sp = slob_page(ptr);
67358+ if (!PageSlab((struct page*)sp)) {
67359+ if (object_is_on_stack(ptr, n) == -1)
67360+ goto report;
67361+ return;
67362+ }
67363+
67364+ if (sp->size) {
67365+ base = page_address(&sp->page);
67366+ if (base <= ptr && n <= sp->size - (ptr - base))
67367+ return;
67368+ goto report;
67369+ }
67370+
67371+ /* some tricky double walking to find the chunk */
67372+ spin_lock_irqsave(&slob_lock, flags);
67373+ base = (void *)((unsigned long)ptr & PAGE_MASK);
67374+ free = sp->free;
67375+
67376+ while (!slob_last(free) && (void *)free <= ptr) {
67377+ base = free + slob_units(free);
67378+ free = slob_next(free);
67379+ }
67380+
67381+ while (base < (void *)free) {
67382+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
67383+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
67384+ int offset;
67385+
67386+ if (ptr < base + align)
67387+ break;
67388+
67389+ offset = ptr - base - align;
67390+ if (offset >= m) {
67391+ base += size;
67392+ continue;
67393+ }
67394+
67395+ if (n > m - offset)
67396+ break;
67397+
67398+ spin_unlock_irqrestore(&slob_lock, flags);
67399+ return;
67400+ }
67401+
67402+ spin_unlock_irqrestore(&slob_lock, flags);
67403+report:
67404+ pax_report_usercopy(ptr, n, to, NULL);
67405+#endif
67406+
67407+}
67408+EXPORT_SYMBOL(check_object_size);
67409+
67410 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
67411 size_t ksize(const void *block)
67412 {
67413@@ -550,10 +639,10 @@ size_t ksize(const void *block)
67414 sp = slob_page(block);
67415 if (is_slob_page(sp)) {
67416 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67417- unsigned int *m = (unsigned int *)(block - align);
67418- return SLOB_UNITS(*m) * SLOB_UNIT;
67419+ slob_t *m = (slob_t *)(block - align);
67420+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
67421 } else
67422- return sp->page.private;
67423+ return sp->size;
67424 }
67425 EXPORT_SYMBOL(ksize);
67426
67427@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
67428 {
67429 struct kmem_cache *c;
67430
67431+#ifdef CONFIG_PAX_USERCOPY
67432+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
67433+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
67434+#else
67435 c = slob_alloc(sizeof(struct kmem_cache),
67436 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
67437+#endif
67438
67439 if (c) {
67440 c->name = name;
67441@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
67442 {
67443 void *b;
67444
67445+#ifdef CONFIG_PAX_USERCOPY
67446+ b = __kmalloc_node_align(c->size, flags, node, c->align);
67447+#else
67448 if (c->size < PAGE_SIZE) {
67449 b = slob_alloc(c->size, flags, c->align, node);
67450 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67451 SLOB_UNITS(c->size) * SLOB_UNIT,
67452 flags, node);
67453 } else {
67454+ struct slob_page *sp;
67455+
67456 b = slob_new_pages(flags, get_order(c->size), node);
67457+ sp = slob_page(b);
67458+ sp->size = c->size;
67459 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67460 PAGE_SIZE << get_order(c->size),
67461 flags, node);
67462 }
67463+#endif
67464
67465 if (c->ctor)
67466 c->ctor(b);
67467@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
67468
67469 static void __kmem_cache_free(void *b, int size)
67470 {
67471- if (size < PAGE_SIZE)
67472+ struct slob_page *sp = slob_page(b);
67473+
67474+ if (is_slob_page(sp))
67475 slob_free(b, size);
67476- else
67477+ else {
67478+ clear_slob_page(sp);
67479+ free_slob_page(sp);
67480+ sp->size = 0;
67481 slob_free_pages(b, get_order(size));
67482+ }
67483 }
67484
67485 static void kmem_rcu_free(struct rcu_head *head)
67486@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
67487
67488 void kmem_cache_free(struct kmem_cache *c, void *b)
67489 {
67490+ int size = c->size;
67491+
67492+#ifdef CONFIG_PAX_USERCOPY
67493+ if (size + c->align < PAGE_SIZE) {
67494+ size += c->align;
67495+ b -= c->align;
67496+ }
67497+#endif
67498+
67499 kmemleak_free_recursive(b, c->flags);
67500 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
67501 struct slob_rcu *slob_rcu;
67502- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
67503- slob_rcu->size = c->size;
67504+ slob_rcu = b + (size - sizeof(struct slob_rcu));
67505+ slob_rcu->size = size;
67506 call_rcu(&slob_rcu->head, kmem_rcu_free);
67507 } else {
67508- __kmem_cache_free(b, c->size);
67509+ __kmem_cache_free(b, size);
67510 }
67511
67512+#ifdef CONFIG_PAX_USERCOPY
67513+ trace_kfree(_RET_IP_, b);
67514+#else
67515 trace_kmem_cache_free(_RET_IP_, b);
67516+#endif
67517+
67518 }
67519 EXPORT_SYMBOL(kmem_cache_free);
67520
67521diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
67522--- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
67523+++ linux-3.0.4/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
67524@@ -442,7 +442,7 @@ static void print_track(const char *s, s
67525 if (!t->addr)
67526 return;
67527
67528- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
67529+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
67530 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
67531 }
67532
67533@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
67534
67535 page = virt_to_head_page(x);
67536
67537+ BUG_ON(!PageSlab(page));
67538+
67539 slab_free(s, page, x, _RET_IP_);
67540
67541 trace_kmem_cache_free(_RET_IP_, x);
67542@@ -2170,7 +2172,7 @@ static int slub_min_objects;
67543 * Merge control. If this is set then no merging of slab caches will occur.
67544 * (Could be removed. This was introduced to pacify the merge skeptics.)
67545 */
67546-static int slub_nomerge;
67547+static int slub_nomerge = 1;
67548
67549 /*
67550 * Calculate the order of allocation given an slab object size.
67551@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
67552 * list to avoid pounding the page allocator excessively.
67553 */
67554 set_min_partial(s, ilog2(s->size));
67555- s->refcount = 1;
67556+ atomic_set(&s->refcount, 1);
67557 #ifdef CONFIG_NUMA
67558 s->remote_node_defrag_ratio = 1000;
67559 #endif
67560@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
67561 void kmem_cache_destroy(struct kmem_cache *s)
67562 {
67563 down_write(&slub_lock);
67564- s->refcount--;
67565- if (!s->refcount) {
67566+ if (atomic_dec_and_test(&s->refcount)) {
67567 list_del(&s->list);
67568 if (kmem_cache_close(s)) {
67569 printk(KERN_ERR "SLUB %s: %s called for cache that "
67570@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
67571 EXPORT_SYMBOL(__kmalloc_node);
67572 #endif
67573
67574+void check_object_size(const void *ptr, unsigned long n, bool to)
67575+{
67576+
67577+#ifdef CONFIG_PAX_USERCOPY
67578+ struct page *page;
67579+ struct kmem_cache *s = NULL;
67580+ unsigned long offset;
67581+
67582+ if (!n)
67583+ return;
67584+
67585+ if (ZERO_OR_NULL_PTR(ptr))
67586+ goto report;
67587+
67588+ if (!virt_addr_valid(ptr))
67589+ return;
67590+
67591+ page = virt_to_head_page(ptr);
67592+
67593+ if (!PageSlab(page)) {
67594+ if (object_is_on_stack(ptr, n) == -1)
67595+ goto report;
67596+ return;
67597+ }
67598+
67599+ s = page->slab;
67600+ if (!(s->flags & SLAB_USERCOPY))
67601+ goto report;
67602+
67603+ offset = (ptr - page_address(page)) % s->size;
67604+ if (offset <= s->objsize && n <= s->objsize - offset)
67605+ return;
67606+
67607+report:
67608+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67609+#endif
67610+
67611+}
67612+EXPORT_SYMBOL(check_object_size);
67613+
67614 size_t ksize(const void *object)
67615 {
67616 struct page *page;
67617@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
67618 int node;
67619
67620 list_add(&s->list, &slab_caches);
67621- s->refcount = -1;
67622+ atomic_set(&s->refcount, -1);
67623
67624 for_each_node_state(node, N_NORMAL_MEMORY) {
67625 struct kmem_cache_node *n = get_node(s, node);
67626@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
67627
67628 /* Caches that are not of the two-to-the-power-of size */
67629 if (KMALLOC_MIN_SIZE <= 32) {
67630- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
67631+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
67632 caches++;
67633 }
67634
67635 if (KMALLOC_MIN_SIZE <= 64) {
67636- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
67637+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
67638 caches++;
67639 }
67640
67641 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67642- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
67643+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
67644 caches++;
67645 }
67646
67647@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
67648 /*
67649 * We may have set a slab to be unmergeable during bootstrap.
67650 */
67651- if (s->refcount < 0)
67652+ if (atomic_read(&s->refcount) < 0)
67653 return 1;
67654
67655 return 0;
67656@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
67657 down_write(&slub_lock);
67658 s = find_mergeable(size, align, flags, name, ctor);
67659 if (s) {
67660- s->refcount++;
67661+ atomic_inc(&s->refcount);
67662 /*
67663 * Adjust the object sizes so that we clear
67664 * the complete object on kzalloc.
67665@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
67666 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
67667
67668 if (sysfs_slab_alias(s, name)) {
67669- s->refcount--;
67670+ atomic_dec(&s->refcount);
67671 goto err;
67672 }
67673 up_write(&slub_lock);
67674@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
67675
67676 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67677 {
67678- return sprintf(buf, "%d\n", s->refcount - 1);
67679+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67680 }
67681 SLAB_ATTR_RO(aliases);
67682
67683@@ -4894,7 +4935,13 @@ static const struct file_operations proc
67684
67685 static int __init slab_proc_init(void)
67686 {
67687- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67688+ mode_t gr_mode = S_IRUGO;
67689+
67690+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67691+ gr_mode = S_IRUSR;
67692+#endif
67693+
67694+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67695 return 0;
67696 }
67697 module_init(slab_proc_init);
67698diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
67699--- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
67700+++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
67701@@ -31,6 +31,7 @@
67702 #include <linux/backing-dev.h>
67703 #include <linux/memcontrol.h>
67704 #include <linux/gfp.h>
67705+#include <linux/hugetlb.h>
67706
67707 #include "internal.h"
67708
67709@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
67710
67711 __page_cache_release(page);
67712 dtor = get_compound_page_dtor(page);
67713+ if (!PageHuge(page))
67714+ BUG_ON(dtor != free_compound_page);
67715 (*dtor)(page);
67716 }
67717
67718diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
67719--- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
67720+++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
67721@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
67722
67723 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
67724 /* Activity counter to indicate that a swapon or swapoff has occurred */
67725-static atomic_t proc_poll_event = ATOMIC_INIT(0);
67726+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
67727
67728 static inline unsigned char swap_count(unsigned char ent)
67729 {
67730@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
67731 }
67732 filp_close(swap_file, NULL);
67733 err = 0;
67734- atomic_inc(&proc_poll_event);
67735+ atomic_inc_unchecked(&proc_poll_event);
67736 wake_up_interruptible(&proc_poll_wait);
67737
67738 out_dput:
67739@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
67740
67741 poll_wait(file, &proc_poll_wait, wait);
67742
67743- if (s->event != atomic_read(&proc_poll_event)) {
67744- s->event = atomic_read(&proc_poll_event);
67745+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
67746+ s->event = atomic_read_unchecked(&proc_poll_event);
67747 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
67748 }
67749
67750@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
67751 }
67752
67753 s->seq.private = s;
67754- s->event = atomic_read(&proc_poll_event);
67755+ s->event = atomic_read_unchecked(&proc_poll_event);
67756 return ret;
67757 }
67758
67759@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
67760 (p->flags & SWP_DISCARDABLE) ? "D" : "");
67761
67762 mutex_unlock(&swapon_mutex);
67763- atomic_inc(&proc_poll_event);
67764+ atomic_inc_unchecked(&proc_poll_event);
67765 wake_up_interruptible(&proc_poll_wait);
67766
67767 if (S_ISREG(inode->i_mode))
67768diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
67769--- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
67770+++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
67771@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
67772 * allocated buffer. Use this if you don't want to free the buffer immediately
67773 * like, for example, with RCU.
67774 */
67775+#undef __krealloc
67776 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
67777 {
67778 void *ret;
67779@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
67780 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
67781 * %NULL pointer, the object pointed to is freed.
67782 */
67783+#undef krealloc
67784 void *krealloc(const void *p, size_t new_size, gfp_t flags)
67785 {
67786 void *ret;
67787@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
67788 void arch_pick_mmap_layout(struct mm_struct *mm)
67789 {
67790 mm->mmap_base = TASK_UNMAPPED_BASE;
67791+
67792+#ifdef CONFIG_PAX_RANDMMAP
67793+ if (mm->pax_flags & MF_PAX_RANDMMAP)
67794+ mm->mmap_base += mm->delta_mmap;
67795+#endif
67796+
67797 mm->get_unmapped_area = arch_get_unmapped_area;
67798 mm->unmap_area = arch_unmap_area;
67799 }
67800diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
67801--- linux-3.0.4/mm/vmalloc.c 2011-09-02 18:11:21.000000000 -0400
67802+++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
67803@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67804
67805 pte = pte_offset_kernel(pmd, addr);
67806 do {
67807- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67808- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67809+
67810+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67811+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67812+ BUG_ON(!pte_exec(*pte));
67813+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67814+ continue;
67815+ }
67816+#endif
67817+
67818+ {
67819+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67820+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67821+ }
67822 } while (pte++, addr += PAGE_SIZE, addr != end);
67823 }
67824
67825@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67826 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67827 {
67828 pte_t *pte;
67829+ int ret = -ENOMEM;
67830
67831 /*
67832 * nr is a running index into the array which helps higher level
67833@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
67834 pte = pte_alloc_kernel(pmd, addr);
67835 if (!pte)
67836 return -ENOMEM;
67837+
67838+ pax_open_kernel();
67839 do {
67840 struct page *page = pages[*nr];
67841
67842- if (WARN_ON(!pte_none(*pte)))
67843- return -EBUSY;
67844- if (WARN_ON(!page))
67845- return -ENOMEM;
67846+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67847+ if (pgprot_val(prot) & _PAGE_NX)
67848+#endif
67849+
67850+ if (WARN_ON(!pte_none(*pte))) {
67851+ ret = -EBUSY;
67852+ goto out;
67853+ }
67854+ if (WARN_ON(!page)) {
67855+ ret = -ENOMEM;
67856+ goto out;
67857+ }
67858 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67859 (*nr)++;
67860 } while (pte++, addr += PAGE_SIZE, addr != end);
67861- return 0;
67862+ ret = 0;
67863+out:
67864+ pax_close_kernel();
67865+ return ret;
67866 }
67867
67868 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67869@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
67870 * and fall back on vmalloc() if that fails. Others
67871 * just put it in the vmalloc space.
67872 */
67873-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67874+#ifdef CONFIG_MODULES
67875+#ifdef MODULES_VADDR
67876 unsigned long addr = (unsigned long)x;
67877 if (addr >= MODULES_VADDR && addr < MODULES_END)
67878 return 1;
67879 #endif
67880+
67881+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67882+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67883+ return 1;
67884+#endif
67885+
67886+#endif
67887+
67888 return is_vmalloc_addr(x);
67889 }
67890
67891@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
67892
67893 if (!pgd_none(*pgd)) {
67894 pud_t *pud = pud_offset(pgd, addr);
67895+#ifdef CONFIG_X86
67896+ if (!pud_large(*pud))
67897+#endif
67898 if (!pud_none(*pud)) {
67899 pmd_t *pmd = pmd_offset(pud, addr);
67900+#ifdef CONFIG_X86
67901+ if (!pmd_large(*pmd))
67902+#endif
67903 if (!pmd_none(*pmd)) {
67904 pte_t *ptep, pte;
67905
67906@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
67907 struct vm_struct *area;
67908
67909 BUG_ON(in_interrupt());
67910+
67911+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67912+ if (flags & VM_KERNEXEC) {
67913+ if (start != VMALLOC_START || end != VMALLOC_END)
67914+ return NULL;
67915+ start = (unsigned long)MODULES_EXEC_VADDR;
67916+ end = (unsigned long)MODULES_EXEC_END;
67917+ }
67918+#endif
67919+
67920 if (flags & VM_IOREMAP) {
67921 int bit = fls(size);
67922
67923@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
67924 if (count > totalram_pages)
67925 return NULL;
67926
67927+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67928+ if (!(pgprot_val(prot) & _PAGE_NX))
67929+ flags |= VM_KERNEXEC;
67930+#endif
67931+
67932 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67933 __builtin_return_address(0));
67934 if (!area)
67935@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
67936 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67937 return NULL;
67938
67939+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67940+ if (!(pgprot_val(prot) & _PAGE_NX))
67941+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67942+ node, gfp_mask, caller);
67943+ else
67944+#endif
67945+
67946 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
67947 gfp_mask, caller);
67948
67949@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
67950 gfp_mask, prot, node, caller);
67951 }
67952
67953+#undef __vmalloc
67954 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67955 {
67956 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67957@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
67958 * For tight control over page level allocator and protection flags
67959 * use __vmalloc() instead.
67960 */
67961+#undef vmalloc
67962 void *vmalloc(unsigned long size)
67963 {
67964 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
67965@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
67966 * For tight control over page level allocator and protection flags
67967 * use __vmalloc() instead.
67968 */
67969+#undef vzalloc
67970 void *vzalloc(unsigned long size)
67971 {
67972 return __vmalloc_node_flags(size, -1,
67973@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
67974 * The resulting memory area is zeroed so it can be mapped to userspace
67975 * without leaking data.
67976 */
67977+#undef vmalloc_user
67978 void *vmalloc_user(unsigned long size)
67979 {
67980 struct vm_struct *area;
67981@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
67982 * For tight control over page level allocator and protection flags
67983 * use __vmalloc() instead.
67984 */
67985+#undef vmalloc_node
67986 void *vmalloc_node(unsigned long size, int node)
67987 {
67988 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67989@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
67990 * For tight control over page level allocator and protection flags
67991 * use __vmalloc_node() instead.
67992 */
67993+#undef vzalloc_node
67994 void *vzalloc_node(unsigned long size, int node)
67995 {
67996 return __vmalloc_node_flags(size, node,
67997@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
67998 * For tight control over page level allocator and protection flags
67999 * use __vmalloc() instead.
68000 */
68001-
68002+#undef vmalloc_exec
68003 void *vmalloc_exec(unsigned long size)
68004 {
68005- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
68006+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
68007 -1, __builtin_return_address(0));
68008 }
68009
68010@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
68011 * Allocate enough 32bit PA addressable pages to cover @size from the
68012 * page level allocator and map them into contiguous kernel virtual space.
68013 */
68014+#undef vmalloc_32
68015 void *vmalloc_32(unsigned long size)
68016 {
68017 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
68018@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
68019 * The resulting memory area is 32bit addressable and zeroed so it can be
68020 * mapped to userspace without leaking data.
68021 */
68022+#undef vmalloc_32_user
68023 void *vmalloc_32_user(unsigned long size)
68024 {
68025 struct vm_struct *area;
68026@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
68027 unsigned long uaddr = vma->vm_start;
68028 unsigned long usize = vma->vm_end - vma->vm_start;
68029
68030+ BUG_ON(vma->vm_mirror);
68031+
68032 if ((PAGE_SIZE-1) & (unsigned long)addr)
68033 return -EINVAL;
68034
68035diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
68036--- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
68037+++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
68038@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
68039 *
68040 * vm_stat contains the global counters
68041 */
68042-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68043+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68044 EXPORT_SYMBOL(vm_stat);
68045
68046 #ifdef CONFIG_SMP
68047@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
68048 v = p->vm_stat_diff[i];
68049 p->vm_stat_diff[i] = 0;
68050 local_irq_restore(flags);
68051- atomic_long_add(v, &zone->vm_stat[i]);
68052+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
68053 global_diff[i] += v;
68054 #ifdef CONFIG_NUMA
68055 /* 3 seconds idle till flush */
68056@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
68057
68058 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
68059 if (global_diff[i])
68060- atomic_long_add(global_diff[i], &vm_stat[i]);
68061+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
68062 }
68063
68064 #endif
68065@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
68066 start_cpu_timer(cpu);
68067 #endif
68068 #ifdef CONFIG_PROC_FS
68069- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
68070- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
68071- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
68072- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
68073+ {
68074+ mode_t gr_mode = S_IRUGO;
68075+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68076+ gr_mode = S_IRUSR;
68077+#endif
68078+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
68079+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
68080+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68081+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
68082+#else
68083+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
68084+#endif
68085+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
68086+ }
68087 #endif
68088 return 0;
68089 }
68090diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
68091--- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
68092+++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
68093@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
68094 err = -EPERM;
68095 if (!capable(CAP_NET_ADMIN))
68096 break;
68097- if ((args.u.name_type >= 0) &&
68098- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
68099+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
68100 struct vlan_net *vn;
68101
68102 vn = net_generic(net, vlan_net_id);
68103diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
68104--- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
68105+++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
68106@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
68107 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
68108 return 1;
68109 atm_return(vcc, truesize);
68110- atomic_inc(&vcc->stats->rx_drop);
68111+ atomic_inc_unchecked(&vcc->stats->rx_drop);
68112 return 0;
68113 }
68114 EXPORT_SYMBOL(atm_charge);
68115@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
68116 }
68117 }
68118 atm_return(vcc, guess);
68119- atomic_inc(&vcc->stats->rx_drop);
68120+ atomic_inc_unchecked(&vcc->stats->rx_drop);
68121 return NULL;
68122 }
68123 EXPORT_SYMBOL(atm_alloc_charge);
68124@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
68125
68126 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
68127 {
68128-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
68129+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
68130 __SONET_ITEMS
68131 #undef __HANDLE_ITEM
68132 }
68133@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
68134
68135 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
68136 {
68137-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
68138+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
68139 __SONET_ITEMS
68140 #undef __HANDLE_ITEM
68141 }
68142diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
68143--- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
68144+++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
68145@@ -48,7 +48,7 @@ struct lane2_ops {
68146 const u8 *tlvs, u32 sizeoftlvs);
68147 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
68148 const u8 *tlvs, u32 sizeoftlvs);
68149-};
68150+} __no_const;
68151
68152 /*
68153 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
68154diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
68155--- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
68156+++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
68157@@ -33,7 +33,7 @@ struct mpoa_client {
68158 struct mpc_parameters parameters; /* parameters for this client */
68159
68160 const struct net_device_ops *old_ops;
68161- struct net_device_ops new_ops;
68162+ net_device_ops_no_const new_ops;
68163 };
68164
68165
68166diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
68167--- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
68168+++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
68169@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
68170 struct timeval now;
68171 struct k_message msg;
68172
68173+ pax_track_stack();
68174+
68175 do_gettimeofday(&now);
68176
68177 read_lock_bh(&client->ingress_lock);
68178diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
68179--- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
68180+++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
68181@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
68182 const struct k_atm_aal_stats *stats)
68183 {
68184 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
68185- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
68186- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
68187- atomic_read(&stats->rx_drop));
68188+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
68189+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
68190+ atomic_read_unchecked(&stats->rx_drop));
68191 }
68192
68193 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
68194diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
68195--- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
68196+++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
68197@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
68198 static void copy_aal_stats(struct k_atm_aal_stats *from,
68199 struct atm_aal_stats *to)
68200 {
68201-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
68202+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
68203 __AAL_STAT_ITEMS
68204 #undef __HANDLE_ITEM
68205 }
68206@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
68207 static void subtract_aal_stats(struct k_atm_aal_stats *from,
68208 struct atm_aal_stats *to)
68209 {
68210-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
68211+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
68212 __AAL_STAT_ITEMS
68213 #undef __HANDLE_ITEM
68214 }
68215diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
68216--- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
68217+++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
68218@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
68219 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
68220 dev_add_pack(&hard_iface->batman_adv_ptype);
68221
68222- atomic_set(&hard_iface->seqno, 1);
68223- atomic_set(&hard_iface->frag_seqno, 1);
68224+ atomic_set_unchecked(&hard_iface->seqno, 1);
68225+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
68226 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
68227 hard_iface->net_dev->name);
68228
68229diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
68230--- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
68231+++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
68232@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
68233 return;
68234
68235 /* could be changed by schedule_own_packet() */
68236- if_incoming_seqno = atomic_read(&if_incoming->seqno);
68237+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
68238
68239 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
68240
68241diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
68242--- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
68243+++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
68244@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
68245
68246 /* change sequence number to network order */
68247 batman_packet->seqno =
68248- htonl((uint32_t)atomic_read(&hard_iface->seqno));
68249+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
68250
68251 if (vis_server == VIS_TYPE_SERVER_SYNC)
68252 batman_packet->flags |= VIS_SERVER;
68253@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
68254 else
68255 batman_packet->gw_flags = 0;
68256
68257- atomic_inc(&hard_iface->seqno);
68258+ atomic_inc_unchecked(&hard_iface->seqno);
68259
68260 slide_own_bcast_window(hard_iface);
68261 send_time = own_send_time(bat_priv);
68262diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
68263--- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
68264+++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
68265@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
68266
68267 /* set broadcast sequence number */
68268 bcast_packet->seqno =
68269- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
68270+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
68271
68272 add_bcast_packet_to_list(bat_priv, skb);
68273
68274@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
68275 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
68276
68277 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
68278- atomic_set(&bat_priv->bcast_seqno, 1);
68279+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
68280 atomic_set(&bat_priv->tt_local_changed, 0);
68281
68282 bat_priv->primary_if = NULL;
68283diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
68284--- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
68285+++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
68286@@ -38,8 +38,8 @@ struct hard_iface {
68287 int16_t if_num;
68288 char if_status;
68289 struct net_device *net_dev;
68290- atomic_t seqno;
68291- atomic_t frag_seqno;
68292+ atomic_unchecked_t seqno;
68293+ atomic_unchecked_t frag_seqno;
68294 unsigned char *packet_buff;
68295 int packet_len;
68296 struct kobject *hardif_obj;
68297@@ -142,7 +142,7 @@ struct bat_priv {
68298 atomic_t orig_interval; /* uint */
68299 atomic_t hop_penalty; /* uint */
68300 atomic_t log_level; /* uint */
68301- atomic_t bcast_seqno;
68302+ atomic_unchecked_t bcast_seqno;
68303 atomic_t bcast_queue_left;
68304 atomic_t batman_queue_left;
68305 char num_ifaces;
68306diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
68307--- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
68308+++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
68309@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
68310 frag1->flags = UNI_FRAG_HEAD | large_tail;
68311 frag2->flags = large_tail;
68312
68313- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
68314+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
68315 frag1->seqno = htons(seqno - 1);
68316 frag2->seqno = htons(seqno);
68317
68318diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
68319--- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
68320+++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
68321@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
68322 nexthdr = ip6h->nexthdr;
68323 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
68324
68325- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
68326+ if (nexthdr != IPPROTO_ICMPV6)
68327 return 0;
68328
68329 /* Okay, we found ICMPv6 header */
68330diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
68331--- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
68332+++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
68333@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
68334 tmp.valid_hooks = t->table->valid_hooks;
68335 }
68336 mutex_unlock(&ebt_mutex);
68337- if (copy_to_user(user, &tmp, *len) != 0){
68338+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
68339 BUGPRINT("c2u Didn't work\n");
68340 ret = -EFAULT;
68341 break;
68342@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
68343 int ret;
68344 void __user *pos;
68345
68346+ pax_track_stack();
68347+
68348 memset(&tinfo, 0, sizeof(tinfo));
68349
68350 if (cmd == EBT_SO_GET_ENTRIES) {
68351diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
68352--- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
68353+++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
68354@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
68355 #ifdef CONFIG_DEBUG_FS
68356 struct debug_fs_counter {
68357 atomic_t caif_nr_socks;
68358- atomic_t caif_sock_create;
68359- atomic_t num_connect_req;
68360- atomic_t num_connect_resp;
68361- atomic_t num_connect_fail_resp;
68362- atomic_t num_disconnect;
68363- atomic_t num_remote_shutdown_ind;
68364- atomic_t num_tx_flow_off_ind;
68365- atomic_t num_tx_flow_on_ind;
68366- atomic_t num_rx_flow_off;
68367- atomic_t num_rx_flow_on;
68368+ atomic_unchecked_t caif_sock_create;
68369+ atomic_unchecked_t num_connect_req;
68370+ atomic_unchecked_t num_connect_resp;
68371+ atomic_unchecked_t num_connect_fail_resp;
68372+ atomic_unchecked_t num_disconnect;
68373+ atomic_unchecked_t num_remote_shutdown_ind;
68374+ atomic_unchecked_t num_tx_flow_off_ind;
68375+ atomic_unchecked_t num_tx_flow_on_ind;
68376+ atomic_unchecked_t num_rx_flow_off;
68377+ atomic_unchecked_t num_rx_flow_on;
68378 };
68379 static struct debug_fs_counter cnt;
68380 #define dbfs_atomic_inc(v) atomic_inc_return(v)
68381+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
68382 #define dbfs_atomic_dec(v) atomic_dec_return(v)
68383 #else
68384 #define dbfs_atomic_inc(v) 0
68385@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
68386 atomic_read(&cf_sk->sk.sk_rmem_alloc),
68387 sk_rcvbuf_lowwater(cf_sk));
68388 set_rx_flow_off(cf_sk);
68389- dbfs_atomic_inc(&cnt.num_rx_flow_off);
68390+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
68391 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
68392 }
68393
68394@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
68395 set_rx_flow_off(cf_sk);
68396 if (net_ratelimit())
68397 pr_debug("sending flow OFF due to rmem_schedule\n");
68398- dbfs_atomic_inc(&cnt.num_rx_flow_off);
68399+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
68400 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
68401 }
68402 skb->dev = NULL;
68403@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
68404 switch (flow) {
68405 case CAIF_CTRLCMD_FLOW_ON_IND:
68406 /* OK from modem to start sending again */
68407- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
68408+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
68409 set_tx_flow_on(cf_sk);
68410 cf_sk->sk.sk_state_change(&cf_sk->sk);
68411 break;
68412
68413 case CAIF_CTRLCMD_FLOW_OFF_IND:
68414 /* Modem asks us to shut up */
68415- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
68416+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
68417 set_tx_flow_off(cf_sk);
68418 cf_sk->sk.sk_state_change(&cf_sk->sk);
68419 break;
68420@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
68421 /* We're now connected */
68422 caif_client_register_refcnt(&cf_sk->layer,
68423 cfsk_hold, cfsk_put);
68424- dbfs_atomic_inc(&cnt.num_connect_resp);
68425+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
68426 cf_sk->sk.sk_state = CAIF_CONNECTED;
68427 set_tx_flow_on(cf_sk);
68428 cf_sk->sk.sk_state_change(&cf_sk->sk);
68429@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
68430
68431 case CAIF_CTRLCMD_INIT_FAIL_RSP:
68432 /* Connect request failed */
68433- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
68434+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
68435 cf_sk->sk.sk_err = ECONNREFUSED;
68436 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
68437 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
68438@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
68439
68440 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
68441 /* Modem has closed this connection, or device is down. */
68442- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
68443+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
68444 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
68445 cf_sk->sk.sk_err = ECONNRESET;
68446 set_rx_flow_on(cf_sk);
68447@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
68448 return;
68449
68450 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
68451- dbfs_atomic_inc(&cnt.num_rx_flow_on);
68452+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
68453 set_rx_flow_on(cf_sk);
68454 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
68455 }
68456@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
68457 /*ifindex = id of the interface.*/
68458 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
68459
68460- dbfs_atomic_inc(&cnt.num_connect_req);
68461+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
68462 cf_sk->layer.receive = caif_sktrecv_cb;
68463
68464 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
68465@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
68466 spin_unlock_bh(&sk->sk_receive_queue.lock);
68467 sock->sk = NULL;
68468
68469- dbfs_atomic_inc(&cnt.num_disconnect);
68470+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
68471
68472 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
68473 if (cf_sk->debugfs_socket_dir != NULL)
68474@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
68475 cf_sk->conn_req.protocol = protocol;
68476 /* Increase the number of sockets created. */
68477 dbfs_atomic_inc(&cnt.caif_nr_socks);
68478- num = dbfs_atomic_inc(&cnt.caif_sock_create);
68479+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
68480 #ifdef CONFIG_DEBUG_FS
68481 if (!IS_ERR(debugfsdir)) {
68482
68483diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
68484--- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
68485+++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
68486@@ -9,6 +9,7 @@
68487 #include <linux/stddef.h>
68488 #include <linux/spinlock.h>
68489 #include <linux/slab.h>
68490+#include <linux/sched.h>
68491 #include <net/caif/caif_layer.h>
68492 #include <net/caif/cfpkt.h>
68493 #include <net/caif/cfctrl.h>
68494@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
68495 dev_info.id = 0xff;
68496 memset(this, 0, sizeof(*this));
68497 cfsrvl_init(&this->serv, 0, &dev_info, false);
68498- atomic_set(&this->req_seq_no, 1);
68499- atomic_set(&this->rsp_seq_no, 1);
68500+ atomic_set_unchecked(&this->req_seq_no, 1);
68501+ atomic_set_unchecked(&this->rsp_seq_no, 1);
68502 this->serv.layer.receive = cfctrl_recv;
68503 sprintf(this->serv.layer.name, "ctrl");
68504 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
68505@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
68506 struct cfctrl_request_info *req)
68507 {
68508 spin_lock_bh(&ctrl->info_list_lock);
68509- atomic_inc(&ctrl->req_seq_no);
68510- req->sequence_no = atomic_read(&ctrl->req_seq_no);
68511+ atomic_inc_unchecked(&ctrl->req_seq_no);
68512+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
68513 list_add_tail(&req->list, &ctrl->list);
68514 spin_unlock_bh(&ctrl->info_list_lock);
68515 }
68516@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
68517 if (p != first)
68518 pr_warn("Requests are not received in order\n");
68519
68520- atomic_set(&ctrl->rsp_seq_no,
68521+ atomic_set_unchecked(&ctrl->rsp_seq_no,
68522 p->sequence_no);
68523 list_del(&p->list);
68524 goto out;
68525@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
68526 struct cfctrl *cfctrl = container_obj(layer);
68527 struct cfctrl_request_info rsp, *req;
68528
68529+ pax_track_stack();
68530
68531 cfpkt_extr_head(pkt, &cmdrsp, 1);
68532 cmd = cmdrsp & CFCTRL_CMD_MASK;
68533diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
68534--- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
68535+++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
68536@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
68537 }
68538
68539 kfree_skb(skb);
68540- atomic_inc(&sk->sk_drops);
68541+ atomic_inc_unchecked(&sk->sk_drops);
68542 sk_mem_reclaim_partial(sk);
68543
68544 return err;
68545diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
68546--- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
68547+++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
68548@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
68549 if (no_module && capable(CAP_NET_ADMIN))
68550 no_module = request_module("netdev-%s", name);
68551 if (no_module && capable(CAP_SYS_MODULE)) {
68552+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68553+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
68554+#else
68555 if (!request_module("%s", name))
68556 pr_err("Loading kernel module for a network device "
68557 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
68558 "instead\n", name);
68559+#endif
68560 }
68561 }
68562 EXPORT_SYMBOL(dev_load);
68563@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
68564
68565 struct dev_gso_cb {
68566 void (*destructor)(struct sk_buff *skb);
68567-};
68568+} __no_const;
68569
68570 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
68571
68572@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
68573 }
68574 EXPORT_SYMBOL(netif_rx_ni);
68575
68576-static void net_tx_action(struct softirq_action *h)
68577+static void net_tx_action(void)
68578 {
68579 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68580
68581@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
68582 }
68583 EXPORT_SYMBOL(netif_napi_del);
68584
68585-static void net_rx_action(struct softirq_action *h)
68586+static void net_rx_action(void)
68587 {
68588 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68589 unsigned long time_limit = jiffies + 2;
68590diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
68591--- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
68592+++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
68593@@ -60,7 +60,7 @@ struct flow_cache {
68594 struct timer_list rnd_timer;
68595 };
68596
68597-atomic_t flow_cache_genid = ATOMIC_INIT(0);
68598+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
68599 EXPORT_SYMBOL(flow_cache_genid);
68600 static struct flow_cache flow_cache_global;
68601 static struct kmem_cache *flow_cachep __read_mostly;
68602@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
68603
68604 static int flow_entry_valid(struct flow_cache_entry *fle)
68605 {
68606- if (atomic_read(&flow_cache_genid) != fle->genid)
68607+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
68608 return 0;
68609 if (fle->object && !fle->object->ops->check(fle->object))
68610 return 0;
68611@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
68612 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
68613 fcp->hash_count++;
68614 }
68615- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
68616+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
68617 flo = fle->object;
68618 if (!flo)
68619 goto ret_object;
68620@@ -274,7 +274,7 @@ nocache:
68621 }
68622 flo = resolver(net, key, family, dir, flo, ctx);
68623 if (fle) {
68624- fle->genid = atomic_read(&flow_cache_genid);
68625+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
68626 if (!IS_ERR(flo))
68627 fle->object = flo;
68628 else
68629diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
68630--- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
68631+++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
68632@@ -56,7 +56,7 @@
68633 struct rtnl_link {
68634 rtnl_doit_func doit;
68635 rtnl_dumpit_func dumpit;
68636-};
68637+} __no_const;
68638
68639 static DEFINE_MUTEX(rtnl_mutex);
68640
68641diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
68642--- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
68643+++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
68644@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
68645 struct sock *sk = skb->sk;
68646 int ret = 0;
68647
68648+ pax_track_stack();
68649+
68650 if (splice_grow_spd(pipe, &spd))
68651 return -ENOMEM;
68652
68653diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
68654--- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
68655+++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
68656@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68657 */
68658 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
68659 (unsigned)sk->sk_rcvbuf) {
68660- atomic_inc(&sk->sk_drops);
68661+ atomic_inc_unchecked(&sk->sk_drops);
68662 return -ENOMEM;
68663 }
68664
68665@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68666 return err;
68667
68668 if (!sk_rmem_schedule(sk, skb->truesize)) {
68669- atomic_inc(&sk->sk_drops);
68670+ atomic_inc_unchecked(&sk->sk_drops);
68671 return -ENOBUFS;
68672 }
68673
68674@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68675 skb_dst_force(skb);
68676
68677 spin_lock_irqsave(&list->lock, flags);
68678- skb->dropcount = atomic_read(&sk->sk_drops);
68679+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
68680 __skb_queue_tail(list, skb);
68681 spin_unlock_irqrestore(&list->lock, flags);
68682
68683@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
68684 skb->dev = NULL;
68685
68686 if (sk_rcvqueues_full(sk, skb)) {
68687- atomic_inc(&sk->sk_drops);
68688+ atomic_inc_unchecked(&sk->sk_drops);
68689 goto discard_and_relse;
68690 }
68691 if (nested)
68692@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
68693 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
68694 } else if (sk_add_backlog(sk, skb)) {
68695 bh_unlock_sock(sk);
68696- atomic_inc(&sk->sk_drops);
68697+ atomic_inc_unchecked(&sk->sk_drops);
68698 goto discard_and_relse;
68699 }
68700
68701@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
68702 if (len > sizeof(peercred))
68703 len = sizeof(peercred);
68704 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
68705- if (copy_to_user(optval, &peercred, len))
68706+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
68707 return -EFAULT;
68708 goto lenout;
68709 }
68710@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
68711 return -ENOTCONN;
68712 if (lv < len)
68713 return -EINVAL;
68714- if (copy_to_user(optval, address, len))
68715+ if (len > sizeof(address) || copy_to_user(optval, address, len))
68716 return -EFAULT;
68717 goto lenout;
68718 }
68719@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
68720
68721 if (len > lv)
68722 len = lv;
68723- if (copy_to_user(optval, &v, len))
68724+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
68725 return -EFAULT;
68726 lenout:
68727 if (put_user(len, optlen))
68728@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
68729 */
68730 smp_wmb();
68731 atomic_set(&sk->sk_refcnt, 1);
68732- atomic_set(&sk->sk_drops, 0);
68733+ atomic_set_unchecked(&sk->sk_drops, 0);
68734 }
68735 EXPORT_SYMBOL(sock_init_data);
68736
68737diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
68738--- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
68739+++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
68740@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
68741
68742 if (len > *lenp) len = *lenp;
68743
68744- if (copy_to_user(buffer, addr, len))
68745+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
68746 return -EFAULT;
68747
68748 *lenp = len;
68749@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
68750
68751 if (len > *lenp) len = *lenp;
68752
68753- if (copy_to_user(buffer, devname, len))
68754+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
68755 return -EFAULT;
68756
68757 *lenp = len;
68758diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
68759--- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
68760+++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
68761@@ -4,7 +4,7 @@
68762
68763 config ECONET
68764 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68765- depends on EXPERIMENTAL && INET
68766+ depends on EXPERIMENTAL && INET && BROKEN
68767 ---help---
68768 Econet is a fairly old and slow networking protocol mainly used by
68769 Acorn computers to access file and print servers. It uses native
68770diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
68771--- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
68772+++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
68773@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
68774 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68775 fib_sync_up(dev);
68776 #endif
68777- atomic_inc(&net->ipv4.dev_addr_genid);
68778+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68779 rt_cache_flush(dev_net(dev), -1);
68780 break;
68781 case NETDEV_DOWN:
68782 fib_del_ifaddr(ifa, NULL);
68783- atomic_inc(&net->ipv4.dev_addr_genid);
68784+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68785 if (ifa->ifa_dev->ifa_list == NULL) {
68786 /* Last address was deleted from this interface.
68787 * Disable IP.
68788@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
68789 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68790 fib_sync_up(dev);
68791 #endif
68792- atomic_inc(&net->ipv4.dev_addr_genid);
68793+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68794 rt_cache_flush(dev_net(dev), -1);
68795 break;
68796 case NETDEV_DOWN:
68797diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
68798--- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
68799+++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
68800@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
68801 nh->nh_saddr = inet_select_addr(nh->nh_dev,
68802 nh->nh_gw,
68803 nh->nh_parent->fib_scope);
68804- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
68805+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
68806
68807 return nh->nh_saddr;
68808 }
68809diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
68810--- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
68811+++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
68812@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
68813 r->idiag_retrans = 0;
68814
68815 r->id.idiag_if = sk->sk_bound_dev_if;
68816+
68817+#ifdef CONFIG_GRKERNSEC_HIDESYM
68818+ r->id.idiag_cookie[0] = 0;
68819+ r->id.idiag_cookie[1] = 0;
68820+#else
68821 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68822 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68823+#endif
68824
68825 r->id.idiag_sport = inet->inet_sport;
68826 r->id.idiag_dport = inet->inet_dport;
68827@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
68828 r->idiag_family = tw->tw_family;
68829 r->idiag_retrans = 0;
68830 r->id.idiag_if = tw->tw_bound_dev_if;
68831+
68832+#ifdef CONFIG_GRKERNSEC_HIDESYM
68833+ r->id.idiag_cookie[0] = 0;
68834+ r->id.idiag_cookie[1] = 0;
68835+#else
68836 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68837 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68838+#endif
68839+
68840 r->id.idiag_sport = tw->tw_sport;
68841 r->id.idiag_dport = tw->tw_dport;
68842 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68843@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
68844 if (sk == NULL)
68845 goto unlock;
68846
68847+#ifndef CONFIG_GRKERNSEC_HIDESYM
68848 err = -ESTALE;
68849 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68850 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68851 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68852 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68853 goto out;
68854+#endif
68855
68856 err = -ENOMEM;
68857 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68858@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
68859 r->idiag_retrans = req->retrans;
68860
68861 r->id.idiag_if = sk->sk_bound_dev_if;
68862+
68863+#ifdef CONFIG_GRKERNSEC_HIDESYM
68864+ r->id.idiag_cookie[0] = 0;
68865+ r->id.idiag_cookie[1] = 0;
68866+#else
68867 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68868 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68869+#endif
68870
68871 tmo = req->expires - jiffies;
68872 if (tmo < 0)
68873diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
68874--- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
68875+++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
68876@@ -18,12 +18,15 @@
68877 #include <linux/sched.h>
68878 #include <linux/slab.h>
68879 #include <linux/wait.h>
68880+#include <linux/security.h>
68881
68882 #include <net/inet_connection_sock.h>
68883 #include <net/inet_hashtables.h>
68884 #include <net/secure_seq.h>
68885 #include <net/ip.h>
68886
68887+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68888+
68889 /*
68890 * Allocate and initialize a new local port bind bucket.
68891 * The bindhash mutex for snum's hash chain must be held here.
68892@@ -530,6 +533,8 @@ ok:
68893 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
68894 spin_unlock(&head->lock);
68895
68896+ gr_update_task_in_ip_table(current, inet_sk(sk));
68897+
68898 if (tw) {
68899 inet_twsk_deschedule(tw, death_row);
68900 while (twrefcnt) {
68901diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
68902--- linux-3.0.4/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
68903+++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
68904@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
68905 unsigned int sequence;
68906 int invalidated, newrefcnt = 0;
68907
68908+ pax_track_stack();
68909+
68910 /* Look up for the address quickly, lockless.
68911 * Because of a concurrent writer, we might not find an existing entry.
68912 */
68913@@ -517,8 +519,8 @@ found: /* The existing node has been fo
68914 if (p) {
68915 p->daddr = *daddr;
68916 atomic_set(&p->refcnt, 1);
68917- atomic_set(&p->rid, 0);
68918- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68919+ atomic_set_unchecked(&p->rid, 0);
68920+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68921 p->tcp_ts_stamp = 0;
68922 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
68923 p->rate_tokens = 0;
68924diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
68925--- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
68926+++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
68927@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
68928 return 0;
68929
68930 start = qp->rid;
68931- end = atomic_inc_return(&peer->rid);
68932+ end = atomic_inc_return_unchecked(&peer->rid);
68933 qp->rid = end;
68934
68935 rc = qp->q.fragments && (end - start) > max;
68936diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
68937--- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68938+++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68939@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
68940 int val;
68941 int len;
68942
68943+ pax_track_stack();
68944+
68945 if (level != SOL_IP)
68946 return -EOPNOTSUPP;
68947
68948@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
68949 len = min_t(unsigned int, len, opt->optlen);
68950 if (put_user(len, optlen))
68951 return -EFAULT;
68952- if (copy_to_user(optval, opt->__data, len))
68953+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
68954+ copy_to_user(optval, opt->__data, len))
68955 return -EFAULT;
68956 return 0;
68957 }
68958diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
68959--- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
68960+++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
68961@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
68962
68963 *len = 0;
68964
68965- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68966+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68967 if (*octets == NULL) {
68968 if (net_ratelimit())
68969 pr_notice("OOM in bsalg (%d)\n", __LINE__);
68970diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
68971--- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
68972+++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
68973@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
68974 sk_rmem_alloc_get(sp),
68975 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68976 atomic_read(&sp->sk_refcnt), sp,
68977- atomic_read(&sp->sk_drops), len);
68978+ atomic_read_unchecked(&sp->sk_drops), len);
68979 }
68980
68981 static int ping_seq_show(struct seq_file *seq, void *v)
68982diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
68983--- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
68984+++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
68985@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
68986 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68987 {
68988 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68989- atomic_inc(&sk->sk_drops);
68990+ atomic_inc_unchecked(&sk->sk_drops);
68991 kfree_skb(skb);
68992 return NET_RX_DROP;
68993 }
68994@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
68995
68996 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68997 {
68998+ struct icmp_filter filter;
68999+
69000 if (optlen > sizeof(struct icmp_filter))
69001 optlen = sizeof(struct icmp_filter);
69002- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
69003+ if (copy_from_user(&filter, optval, optlen))
69004 return -EFAULT;
69005+ raw_sk(sk)->filter = filter;
69006 return 0;
69007 }
69008
69009 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
69010 {
69011 int len, ret = -EFAULT;
69012+ struct icmp_filter filter;
69013
69014 if (get_user(len, optlen))
69015 goto out;
69016@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
69017 if (len > sizeof(struct icmp_filter))
69018 len = sizeof(struct icmp_filter);
69019 ret = -EFAULT;
69020- if (put_user(len, optlen) ||
69021- copy_to_user(optval, &raw_sk(sk)->filter, len))
69022+ filter = raw_sk(sk)->filter;
69023+ if (put_user(len, optlen) || len > sizeof filter ||
69024+ copy_to_user(optval, &filter, len))
69025 goto out;
69026 ret = 0;
69027 out: return ret;
69028@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
69029 sk_wmem_alloc_get(sp),
69030 sk_rmem_alloc_get(sp),
69031 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
69032- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
69033+ atomic_read(&sp->sk_refcnt),
69034+#ifdef CONFIG_GRKERNSEC_HIDESYM
69035+ NULL,
69036+#else
69037+ sp,
69038+#endif
69039+ atomic_read_unchecked(&sp->sk_drops));
69040 }
69041
69042 static int raw_seq_show(struct seq_file *seq, void *v)
69043diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
69044--- linux-3.0.4/net/ipv4/route.c 2011-09-02 18:11:21.000000000 -0400
69045+++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
69046@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
69047
69048 static inline int rt_genid(struct net *net)
69049 {
69050- return atomic_read(&net->ipv4.rt_genid);
69051+ return atomic_read_unchecked(&net->ipv4.rt_genid);
69052 }
69053
69054 #ifdef CONFIG_PROC_FS
69055@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
69056 unsigned char shuffle;
69057
69058 get_random_bytes(&shuffle, sizeof(shuffle));
69059- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
69060+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
69061 }
69062
69063 /*
69064@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
69065 error = rt->dst.error;
69066 if (peer) {
69067 inet_peer_refcheck(rt->peer);
69068- id = atomic_read(&peer->ip_id_count) & 0xffff;
69069+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
69070 if (peer->tcp_ts_stamp) {
69071 ts = peer->tcp_ts;
69072 tsage = get_seconds() - peer->tcp_ts_stamp;
69073diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
69074--- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
69075+++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
69076@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
69077 int val;
69078 int err = 0;
69079
69080+ pax_track_stack();
69081+
69082 /* These are data/string values, all the others are ints */
69083 switch (optname) {
69084 case TCP_CONGESTION: {
69085@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
69086 struct tcp_sock *tp = tcp_sk(sk);
69087 int val, len;
69088
69089+ pax_track_stack();
69090+
69091 if (get_user(len, optlen))
69092 return -EFAULT;
69093
69094diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
69095--- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
69096+++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
69097@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
69098 int sysctl_tcp_low_latency __read_mostly;
69099 EXPORT_SYMBOL(sysctl_tcp_low_latency);
69100
69101+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69102+extern int grsec_enable_blackhole;
69103+#endif
69104
69105 #ifdef CONFIG_TCP_MD5SIG
69106 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
69107@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
69108 return 0;
69109
69110 reset:
69111+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69112+ if (!grsec_enable_blackhole)
69113+#endif
69114 tcp_v4_send_reset(rsk, skb);
69115 discard:
69116 kfree_skb(skb);
69117@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
69118 TCP_SKB_CB(skb)->sacked = 0;
69119
69120 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69121- if (!sk)
69122+ if (!sk) {
69123+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69124+ ret = 1;
69125+#endif
69126 goto no_tcp_socket;
69127-
69128+ }
69129 process:
69130- if (sk->sk_state == TCP_TIME_WAIT)
69131+ if (sk->sk_state == TCP_TIME_WAIT) {
69132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69133+ ret = 2;
69134+#endif
69135 goto do_time_wait;
69136+ }
69137
69138 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
69139 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
69140@@ -1724,6 +1737,10 @@ no_tcp_socket:
69141 bad_packet:
69142 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69143 } else {
69144+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69145+ if (!grsec_enable_blackhole || (ret == 1 &&
69146+ (skb->dev->flags & IFF_LOOPBACK)))
69147+#endif
69148 tcp_v4_send_reset(NULL, skb);
69149 }
69150
69151@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
69152 0, /* non standard timer */
69153 0, /* open_requests have no inode */
69154 atomic_read(&sk->sk_refcnt),
69155+#ifdef CONFIG_GRKERNSEC_HIDESYM
69156+ NULL,
69157+#else
69158 req,
69159+#endif
69160 len);
69161 }
69162
69163@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
69164 sock_i_uid(sk),
69165 icsk->icsk_probes_out,
69166 sock_i_ino(sk),
69167- atomic_read(&sk->sk_refcnt), sk,
69168+ atomic_read(&sk->sk_refcnt),
69169+#ifdef CONFIG_GRKERNSEC_HIDESYM
69170+ NULL,
69171+#else
69172+ sk,
69173+#endif
69174 jiffies_to_clock_t(icsk->icsk_rto),
69175 jiffies_to_clock_t(icsk->icsk_ack.ato),
69176 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
69177@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
69178 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
69179 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
69180 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69181- atomic_read(&tw->tw_refcnt), tw, len);
69182+ atomic_read(&tw->tw_refcnt),
69183+#ifdef CONFIG_GRKERNSEC_HIDESYM
69184+ NULL,
69185+#else
69186+ tw,
69187+#endif
69188+ len);
69189 }
69190
69191 #define TMPSZ 150
69192diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
69193--- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
69194+++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
69195@@ -27,6 +27,10 @@
69196 #include <net/inet_common.h>
69197 #include <net/xfrm.h>
69198
69199+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69200+extern int grsec_enable_blackhole;
69201+#endif
69202+
69203 int sysctl_tcp_syncookies __read_mostly = 1;
69204 EXPORT_SYMBOL(sysctl_tcp_syncookies);
69205
69206@@ -745,6 +749,10 @@ listen_overflow:
69207
69208 embryonic_reset:
69209 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
69210+
69211+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69212+ if (!grsec_enable_blackhole)
69213+#endif
69214 if (!(flg & TCP_FLAG_RST))
69215 req->rsk_ops->send_reset(sk, skb);
69216
69217diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
69218--- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
69219+++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
69220@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
69221 int mss;
69222 int s_data_desired = 0;
69223
69224+ pax_track_stack();
69225+
69226 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
69227 s_data_desired = cvp->s_data_desired;
69228 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
69229diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
69230--- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
69231+++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
69232@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
69233 if (cnt + width >= len)
69234 break;
69235
69236- if (copy_to_user(buf + cnt, tbuf, width))
69237+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
69238 return -EFAULT;
69239 cnt += width;
69240 }
69241diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
69242--- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
69243+++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
69244@@ -22,6 +22,10 @@
69245 #include <linux/gfp.h>
69246 #include <net/tcp.h>
69247
69248+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69249+extern int grsec_lastack_retries;
69250+#endif
69251+
69252 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
69253 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
69254 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
69255@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
69256 }
69257 }
69258
69259+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69260+ if ((sk->sk_state == TCP_LAST_ACK) &&
69261+ (grsec_lastack_retries > 0) &&
69262+ (grsec_lastack_retries < retry_until))
69263+ retry_until = grsec_lastack_retries;
69264+#endif
69265+
69266 if (retransmits_timed_out(sk, retry_until,
69267 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
69268 /* Has it gone just too far? */
69269diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
69270--- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
69271+++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
69272@@ -86,6 +86,7 @@
69273 #include <linux/types.h>
69274 #include <linux/fcntl.h>
69275 #include <linux/module.h>
69276+#include <linux/security.h>
69277 #include <linux/socket.h>
69278 #include <linux/sockios.h>
69279 #include <linux/igmp.h>
69280@@ -107,6 +108,10 @@
69281 #include <net/xfrm.h>
69282 #include "udp_impl.h"
69283
69284+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69285+extern int grsec_enable_blackhole;
69286+#endif
69287+
69288 struct udp_table udp_table __read_mostly;
69289 EXPORT_SYMBOL(udp_table);
69290
69291@@ -564,6 +569,9 @@ found:
69292 return s;
69293 }
69294
69295+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
69296+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
69297+
69298 /*
69299 * This routine is called by the ICMP module when it gets some
69300 * sort of error condition. If err < 0 then the socket should
69301@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
69302 dport = usin->sin_port;
69303 if (dport == 0)
69304 return -EINVAL;
69305+
69306+ err = gr_search_udp_sendmsg(sk, usin);
69307+ if (err)
69308+ return err;
69309 } else {
69310 if (sk->sk_state != TCP_ESTABLISHED)
69311 return -EDESTADDRREQ;
69312+
69313+ err = gr_search_udp_sendmsg(sk, NULL);
69314+ if (err)
69315+ return err;
69316+
69317 daddr = inet->inet_daddr;
69318 dport = inet->inet_dport;
69319 /* Open fast path for connected socket.
69320@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
69321 udp_lib_checksum_complete(skb)) {
69322 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
69323 IS_UDPLITE(sk));
69324- atomic_inc(&sk->sk_drops);
69325+ atomic_inc_unchecked(&sk->sk_drops);
69326 __skb_unlink(skb, rcvq);
69327 __skb_queue_tail(&list_kill, skb);
69328 }
69329@@ -1184,6 +1201,10 @@ try_again:
69330 if (!skb)
69331 goto out;
69332
69333+ err = gr_search_udp_recvmsg(sk, skb);
69334+ if (err)
69335+ goto out_free;
69336+
69337 ulen = skb->len - sizeof(struct udphdr);
69338 if (len > ulen)
69339 len = ulen;
69340@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
69341
69342 drop:
69343 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
69344- atomic_inc(&sk->sk_drops);
69345+ atomic_inc_unchecked(&sk->sk_drops);
69346 kfree_skb(skb);
69347 return -1;
69348 }
69349@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
69350 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
69351
69352 if (!skb1) {
69353- atomic_inc(&sk->sk_drops);
69354+ atomic_inc_unchecked(&sk->sk_drops);
69355 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
69356 IS_UDPLITE(sk));
69357 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
69358@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
69359 goto csum_error;
69360
69361 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
69362+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69363+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69364+#endif
69365 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
69366
69367 /*
69368@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
69369 sk_wmem_alloc_get(sp),
69370 sk_rmem_alloc_get(sp),
69371 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
69372- atomic_read(&sp->sk_refcnt), sp,
69373- atomic_read(&sp->sk_drops), len);
69374+ atomic_read(&sp->sk_refcnt),
69375+#ifdef CONFIG_GRKERNSEC_HIDESYM
69376+ NULL,
69377+#else
69378+ sp,
69379+#endif
69380+ atomic_read_unchecked(&sp->sk_drops), len);
69381 }
69382
69383 int udp4_seq_show(struct seq_file *seq, void *v)
69384diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
69385--- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
69386+++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
69387@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
69388 #ifdef CONFIG_XFRM
69389 {
69390 struct rt6_info *rt = (struct rt6_info *)dst;
69391- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
69392+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
69393 }
69394 #endif
69395 }
69396@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
69397 #ifdef CONFIG_XFRM
69398 if (dst) {
69399 struct rt6_info *rt = (struct rt6_info *)dst;
69400- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
69401+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
69402 __sk_dst_reset(sk);
69403 dst = NULL;
69404 }
69405diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
69406--- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
69407+++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
69408@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
69409 int val, valbool;
69410 int retv = -ENOPROTOOPT;
69411
69412+ pax_track_stack();
69413+
69414 if (optval == NULL)
69415 val=0;
69416 else {
69417@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
69418 int len;
69419 int val;
69420
69421+ pax_track_stack();
69422+
69423 if (ip6_mroute_opt(optname))
69424 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
69425
69426diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
69427--- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
69428+++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
69429@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
69430 {
69431 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
69432 skb_checksum_complete(skb)) {
69433- atomic_inc(&sk->sk_drops);
69434+ atomic_inc_unchecked(&sk->sk_drops);
69435 kfree_skb(skb);
69436 return NET_RX_DROP;
69437 }
69438@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
69439 struct raw6_sock *rp = raw6_sk(sk);
69440
69441 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
69442- atomic_inc(&sk->sk_drops);
69443+ atomic_inc_unchecked(&sk->sk_drops);
69444 kfree_skb(skb);
69445 return NET_RX_DROP;
69446 }
69447@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
69448
69449 if (inet->hdrincl) {
69450 if (skb_checksum_complete(skb)) {
69451- atomic_inc(&sk->sk_drops);
69452+ atomic_inc_unchecked(&sk->sk_drops);
69453 kfree_skb(skb);
69454 return NET_RX_DROP;
69455 }
69456@@ -601,7 +601,7 @@ out:
69457 return err;
69458 }
69459
69460-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
69461+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
69462 struct flowi6 *fl6, struct dst_entry **dstp,
69463 unsigned int flags)
69464 {
69465@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
69466 u16 proto;
69467 int err;
69468
69469+ pax_track_stack();
69470+
69471 /* Rough check on arithmetic overflow,
69472 better check is made in ip6_append_data().
69473 */
69474@@ -909,12 +911,15 @@ do_confirm:
69475 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
69476 char __user *optval, int optlen)
69477 {
69478+ struct icmp6_filter filter;
69479+
69480 switch (optname) {
69481 case ICMPV6_FILTER:
69482 if (optlen > sizeof(struct icmp6_filter))
69483 optlen = sizeof(struct icmp6_filter);
69484- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
69485+ if (copy_from_user(&filter, optval, optlen))
69486 return -EFAULT;
69487+ raw6_sk(sk)->filter = filter;
69488 return 0;
69489 default:
69490 return -ENOPROTOOPT;
69491@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
69492 char __user *optval, int __user *optlen)
69493 {
69494 int len;
69495+ struct icmp6_filter filter;
69496
69497 switch (optname) {
69498 case ICMPV6_FILTER:
69499@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
69500 len = sizeof(struct icmp6_filter);
69501 if (put_user(len, optlen))
69502 return -EFAULT;
69503- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
69504+ filter = raw6_sk(sk)->filter;
69505+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
69506 return -EFAULT;
69507 return 0;
69508 default:
69509@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
69510 0, 0L, 0,
69511 sock_i_uid(sp), 0,
69512 sock_i_ino(sp),
69513- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
69514+ atomic_read(&sp->sk_refcnt),
69515+#ifdef CONFIG_GRKERNSEC_HIDESYM
69516+ NULL,
69517+#else
69518+ sp,
69519+#endif
69520+ atomic_read_unchecked(&sp->sk_drops));
69521 }
69522
69523 static int raw6_seq_show(struct seq_file *seq, void *v)
69524diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
69525--- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
69526+++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
69527@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
69528 }
69529 #endif
69530
69531+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69532+extern int grsec_enable_blackhole;
69533+#endif
69534+
69535 static void tcp_v6_hash(struct sock *sk)
69536 {
69537 if (sk->sk_state != TCP_CLOSE) {
69538@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
69539 return 0;
69540
69541 reset:
69542+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69543+ if (!grsec_enable_blackhole)
69544+#endif
69545 tcp_v6_send_reset(sk, skb);
69546 discard:
69547 if (opt_skb)
69548@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
69549 TCP_SKB_CB(skb)->sacked = 0;
69550
69551 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69552- if (!sk)
69553+ if (!sk) {
69554+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69555+ ret = 1;
69556+#endif
69557 goto no_tcp_socket;
69558+ }
69559
69560 process:
69561- if (sk->sk_state == TCP_TIME_WAIT)
69562+ if (sk->sk_state == TCP_TIME_WAIT) {
69563+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69564+ ret = 2;
69565+#endif
69566 goto do_time_wait;
69567+ }
69568
69569 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
69570 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
69571@@ -1794,6 +1809,10 @@ no_tcp_socket:
69572 bad_packet:
69573 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69574 } else {
69575+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69576+ if (!grsec_enable_blackhole || (ret == 1 &&
69577+ (skb->dev->flags & IFF_LOOPBACK)))
69578+#endif
69579 tcp_v6_send_reset(NULL, skb);
69580 }
69581
69582@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
69583 uid,
69584 0, /* non standard timer */
69585 0, /* open_requests have no inode */
69586- 0, req);
69587+ 0,
69588+#ifdef CONFIG_GRKERNSEC_HIDESYM
69589+ NULL
69590+#else
69591+ req
69592+#endif
69593+ );
69594 }
69595
69596 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
69597@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
69598 sock_i_uid(sp),
69599 icsk->icsk_probes_out,
69600 sock_i_ino(sp),
69601- atomic_read(&sp->sk_refcnt), sp,
69602+ atomic_read(&sp->sk_refcnt),
69603+#ifdef CONFIG_GRKERNSEC_HIDESYM
69604+ NULL,
69605+#else
69606+ sp,
69607+#endif
69608 jiffies_to_clock_t(icsk->icsk_rto),
69609 jiffies_to_clock_t(icsk->icsk_ack.ato),
69610 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
69611@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
69612 dest->s6_addr32[2], dest->s6_addr32[3], destp,
69613 tw->tw_substate, 0, 0,
69614 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69615- atomic_read(&tw->tw_refcnt), tw);
69616+ atomic_read(&tw->tw_refcnt),
69617+#ifdef CONFIG_GRKERNSEC_HIDESYM
69618+ NULL
69619+#else
69620+ tw
69621+#endif
69622+ );
69623 }
69624
69625 static int tcp6_seq_show(struct seq_file *seq, void *v)
69626diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
69627--- linux-3.0.4/net/ipv6/udp.c 2011-09-02 18:11:21.000000000 -0400
69628+++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
69629@@ -50,6 +50,10 @@
69630 #include <linux/seq_file.h>
69631 #include "udp_impl.h"
69632
69633+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69634+extern int grsec_enable_blackhole;
69635+#endif
69636+
69637 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
69638 {
69639 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
69640@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
69641
69642 return 0;
69643 drop:
69644- atomic_inc(&sk->sk_drops);
69645+ atomic_inc_unchecked(&sk->sk_drops);
69646 drop_no_sk_drops_inc:
69647 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
69648 kfree_skb(skb);
69649@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
69650 continue;
69651 }
69652 drop:
69653- atomic_inc(&sk->sk_drops);
69654+ atomic_inc_unchecked(&sk->sk_drops);
69655 UDP6_INC_STATS_BH(sock_net(sk),
69656 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
69657 UDP6_INC_STATS_BH(sock_net(sk),
69658@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69659 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
69660 proto == IPPROTO_UDPLITE);
69661
69662+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69663+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69664+#endif
69665 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
69666
69667 kfree_skb(skb);
69668@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69669 if (!sock_owned_by_user(sk))
69670 udpv6_queue_rcv_skb(sk, skb);
69671 else if (sk_add_backlog(sk, skb)) {
69672- atomic_inc(&sk->sk_drops);
69673+ atomic_inc_unchecked(&sk->sk_drops);
69674 bh_unlock_sock(sk);
69675 sock_put(sk);
69676 goto discard;
69677@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
69678 0, 0L, 0,
69679 sock_i_uid(sp), 0,
69680 sock_i_ino(sp),
69681- atomic_read(&sp->sk_refcnt), sp,
69682- atomic_read(&sp->sk_drops));
69683+ atomic_read(&sp->sk_refcnt),
69684+#ifdef CONFIG_GRKERNSEC_HIDESYM
69685+ NULL,
69686+#else
69687+ sp,
69688+#endif
69689+ atomic_read_unchecked(&sp->sk_drops));
69690 }
69691
69692 int udp6_seq_show(struct seq_file *seq, void *v)
69693diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
69694--- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
69695+++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
69696@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
69697 add_wait_queue(&self->open_wait, &wait);
69698
69699 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
69700- __FILE__,__LINE__, tty->driver->name, self->open_count );
69701+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69702
69703 /* As far as I can see, we protect open_count - Jean II */
69704 spin_lock_irqsave(&self->spinlock, flags);
69705 if (!tty_hung_up_p(filp)) {
69706 extra_count = 1;
69707- self->open_count--;
69708+ local_dec(&self->open_count);
69709 }
69710 spin_unlock_irqrestore(&self->spinlock, flags);
69711- self->blocked_open++;
69712+ local_inc(&self->blocked_open);
69713
69714 while (1) {
69715 if (tty->termios->c_cflag & CBAUD) {
69716@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
69717 }
69718
69719 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69720- __FILE__,__LINE__, tty->driver->name, self->open_count );
69721+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69722
69723 schedule();
69724 }
69725@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
69726 if (extra_count) {
69727 /* ++ is not atomic, so this should be protected - Jean II */
69728 spin_lock_irqsave(&self->spinlock, flags);
69729- self->open_count++;
69730+ local_inc(&self->open_count);
69731 spin_unlock_irqrestore(&self->spinlock, flags);
69732 }
69733- self->blocked_open--;
69734+ local_dec(&self->blocked_open);
69735
69736 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69737- __FILE__,__LINE__, tty->driver->name, self->open_count);
69738+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69739
69740 if (!retval)
69741 self->flags |= ASYNC_NORMAL_ACTIVE;
69742@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
69743 }
69744 /* ++ is not atomic, so this should be protected - Jean II */
69745 spin_lock_irqsave(&self->spinlock, flags);
69746- self->open_count++;
69747+ local_inc(&self->open_count);
69748
69749 tty->driver_data = self;
69750 self->tty = tty;
69751 spin_unlock_irqrestore(&self->spinlock, flags);
69752
69753 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69754- self->line, self->open_count);
69755+ self->line, local_read(&self->open_count));
69756
69757 /* Not really used by us, but lets do it anyway */
69758 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69759@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
69760 return;
69761 }
69762
69763- if ((tty->count == 1) && (self->open_count != 1)) {
69764+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69765 /*
69766 * Uh, oh. tty->count is 1, which means that the tty
69767 * structure will be freed. state->count should always
69768@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
69769 */
69770 IRDA_DEBUG(0, "%s(), bad serial port count; "
69771 "tty->count is 1, state->count is %d\n", __func__ ,
69772- self->open_count);
69773- self->open_count = 1;
69774+ local_read(&self->open_count));
69775+ local_set(&self->open_count, 1);
69776 }
69777
69778- if (--self->open_count < 0) {
69779+ if (local_dec_return(&self->open_count) < 0) {
69780 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69781- __func__, self->line, self->open_count);
69782- self->open_count = 0;
69783+ __func__, self->line, local_read(&self->open_count));
69784+ local_set(&self->open_count, 0);
69785 }
69786- if (self->open_count) {
69787+ if (local_read(&self->open_count)) {
69788 spin_unlock_irqrestore(&self->spinlock, flags);
69789
69790 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69791@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
69792 tty->closing = 0;
69793 self->tty = NULL;
69794
69795- if (self->blocked_open) {
69796+ if (local_read(&self->blocked_open)) {
69797 if (self->close_delay)
69798 schedule_timeout_interruptible(self->close_delay);
69799 wake_up_interruptible(&self->open_wait);
69800@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
69801 spin_lock_irqsave(&self->spinlock, flags);
69802 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69803 self->tty = NULL;
69804- self->open_count = 0;
69805+ local_set(&self->open_count, 0);
69806 spin_unlock_irqrestore(&self->spinlock, flags);
69807
69808 wake_up_interruptible(&self->open_wait);
69809@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
69810 seq_putc(m, '\n');
69811
69812 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69813- seq_printf(m, "Open count: %d\n", self->open_count);
69814+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69815 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69816 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69817
69818diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
69819--- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
69820+++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
69821@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
69822
69823 write_lock_bh(&iucv_sk_list.lock);
69824
69825- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69826+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69827 while (__iucv_get_sock_by_name(name)) {
69828 sprintf(name, "%08x",
69829- atomic_inc_return(&iucv_sk_list.autobind_name));
69830+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69831 }
69832
69833 write_unlock_bh(&iucv_sk_list.lock);
69834diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
69835--- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
69836+++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
69837@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
69838 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69839 struct xfrm_kmaddress k;
69840
69841+ pax_track_stack();
69842+
69843 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69844 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69845 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69846@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
69847 static u32 get_acqseq(void)
69848 {
69849 u32 res;
69850- static atomic_t acqseq;
69851+ static atomic_unchecked_t acqseq;
69852
69853 do {
69854- res = atomic_inc_return(&acqseq);
69855+ res = atomic_inc_return_unchecked(&acqseq);
69856 } while (!res);
69857 return res;
69858 }
69859diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
69860--- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
69861+++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
69862@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
69863 goto out;
69864
69865 lapb->dev = dev;
69866- lapb->callbacks = *callbacks;
69867+ lapb->callbacks = callbacks;
69868
69869 __lapb_insert_cb(lapb);
69870
69871@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
69872
69873 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
69874 {
69875- if (lapb->callbacks.connect_confirmation)
69876- lapb->callbacks.connect_confirmation(lapb->dev, reason);
69877+ if (lapb->callbacks->connect_confirmation)
69878+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
69879 }
69880
69881 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
69882 {
69883- if (lapb->callbacks.connect_indication)
69884- lapb->callbacks.connect_indication(lapb->dev, reason);
69885+ if (lapb->callbacks->connect_indication)
69886+ lapb->callbacks->connect_indication(lapb->dev, reason);
69887 }
69888
69889 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
69890 {
69891- if (lapb->callbacks.disconnect_confirmation)
69892- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
69893+ if (lapb->callbacks->disconnect_confirmation)
69894+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
69895 }
69896
69897 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
69898 {
69899- if (lapb->callbacks.disconnect_indication)
69900- lapb->callbacks.disconnect_indication(lapb->dev, reason);
69901+ if (lapb->callbacks->disconnect_indication)
69902+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
69903 }
69904
69905 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
69906 {
69907- if (lapb->callbacks.data_indication)
69908- return lapb->callbacks.data_indication(lapb->dev, skb);
69909+ if (lapb->callbacks->data_indication)
69910+ return lapb->callbacks->data_indication(lapb->dev, skb);
69911
69912 kfree_skb(skb);
69913 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
69914@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
69915 {
69916 int used = 0;
69917
69918- if (lapb->callbacks.data_transmit) {
69919- lapb->callbacks.data_transmit(lapb->dev, skb);
69920+ if (lapb->callbacks->data_transmit) {
69921+ lapb->callbacks->data_transmit(lapb->dev, skb);
69922 used = 1;
69923 }
69924
69925diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
69926--- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
69927+++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
69928@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
69929 struct tid_ampdu_rx *tid_rx;
69930 struct tid_ampdu_tx *tid_tx;
69931
69932+ pax_track_stack();
69933+
69934 rcu_read_lock();
69935
69936 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
69937@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
69938 struct sta_info *sta = file->private_data;
69939 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
69940
69941+ pax_track_stack();
69942+
69943 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
69944 htc->ht_supported ? "" : "not ");
69945 if (htc->ht_supported) {
69946diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
69947--- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
69948+++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
69949@@ -27,6 +27,7 @@
69950 #include <net/ieee80211_radiotap.h>
69951 #include <net/cfg80211.h>
69952 #include <net/mac80211.h>
69953+#include <asm/local.h>
69954 #include "key.h"
69955 #include "sta_info.h"
69956
69957@@ -721,7 +722,7 @@ struct ieee80211_local {
69958 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69959 spinlock_t queue_stop_reason_lock;
69960
69961- int open_count;
69962+ local_t open_count;
69963 int monitors, cooked_mntrs;
69964 /* number of interfaces with corresponding FIF_ flags */
69965 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
69966diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
69967--- linux-3.0.4/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
69968+++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
69969@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
69970 break;
69971 }
69972
69973- if (local->open_count == 0) {
69974+ if (local_read(&local->open_count) == 0) {
69975 res = drv_start(local);
69976 if (res)
69977 goto err_del_bss;
69978@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
69979 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
69980
69981 if (!is_valid_ether_addr(dev->dev_addr)) {
69982- if (!local->open_count)
69983+ if (!local_read(&local->open_count))
69984 drv_stop(local);
69985 return -EADDRNOTAVAIL;
69986 }
69987@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
69988 mutex_unlock(&local->mtx);
69989
69990 if (coming_up)
69991- local->open_count++;
69992+ local_inc(&local->open_count);
69993
69994 if (hw_reconf_flags) {
69995 ieee80211_hw_config(local, hw_reconf_flags);
69996@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
69997 err_del_interface:
69998 drv_remove_interface(local, &sdata->vif);
69999 err_stop:
70000- if (!local->open_count)
70001+ if (!local_read(&local->open_count))
70002 drv_stop(local);
70003 err_del_bss:
70004 sdata->bss = NULL;
70005@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
70006 }
70007
70008 if (going_down)
70009- local->open_count--;
70010+ local_dec(&local->open_count);
70011
70012 switch (sdata->vif.type) {
70013 case NL80211_IFTYPE_AP_VLAN:
70014@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
70015
70016 ieee80211_recalc_ps(local, -1);
70017
70018- if (local->open_count == 0) {
70019+ if (local_read(&local->open_count) == 0) {
70020 if (local->ops->napi_poll)
70021 napi_disable(&local->napi);
70022 ieee80211_clear_tx_pending(local);
70023diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
70024--- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
70025+++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
70026@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
70027 local->hw.conf.power_level = power;
70028 }
70029
70030- if (changed && local->open_count) {
70031+ if (changed && local_read(&local->open_count)) {
70032 ret = drv_config(local, changed);
70033 /*
70034 * Goal:
70035diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
70036--- linux-3.0.4/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
70037+++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
70038@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
70039 bool have_higher_than_11mbit = false;
70040 u16 ap_ht_cap_flags;
70041
70042+ pax_track_stack();
70043+
70044 /* AssocResp and ReassocResp have identical structure */
70045
70046 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
70047diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
70048--- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
70049+++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
70050@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
70051 cancel_work_sync(&local->dynamic_ps_enable_work);
70052 del_timer_sync(&local->dynamic_ps_timer);
70053
70054- local->wowlan = wowlan && local->open_count;
70055+ local->wowlan = wowlan && local_read(&local->open_count);
70056 if (local->wowlan) {
70057 int err = drv_suspend(local, wowlan);
70058 if (err) {
70059@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
70060 }
70061
70062 /* stop hardware - this must stop RX */
70063- if (local->open_count)
70064+ if (local_read(&local->open_count))
70065 ieee80211_stop_device(local);
70066
70067 suspend:
70068diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
70069--- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
70070+++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
70071@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
70072
70073 ASSERT_RTNL();
70074
70075- if (local->open_count)
70076+ if (local_read(&local->open_count))
70077 return -EBUSY;
70078
70079 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
70080diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
70081--- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
70082+++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
70083@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
70084
70085 spin_unlock_irqrestore(&events->lock, status);
70086
70087- if (copy_to_user(buf, pb, p))
70088+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
70089 return -EFAULT;
70090
70091 return p;
70092diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
70093--- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
70094+++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
70095@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
70096 #endif
70097
70098 /* restart hardware */
70099- if (local->open_count) {
70100+ if (local_read(&local->open_count)) {
70101 /*
70102 * Upon resume hardware can sometimes be goofy due to
70103 * various platform / driver / bus issues, so restarting
70104diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
70105--- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
70106+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
70107@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
70108 /* Increase the refcnt counter of the dest */
70109 atomic_inc(&dest->refcnt);
70110
70111- conn_flags = atomic_read(&dest->conn_flags);
70112+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
70113 if (cp->protocol != IPPROTO_UDP)
70114 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
70115 /* Bind with the destination and its corresponding transmitter */
70116@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
70117 atomic_set(&cp->refcnt, 1);
70118
70119 atomic_set(&cp->n_control, 0);
70120- atomic_set(&cp->in_pkts, 0);
70121+ atomic_set_unchecked(&cp->in_pkts, 0);
70122
70123 atomic_inc(&ipvs->conn_count);
70124 if (flags & IP_VS_CONN_F_NO_CPORT)
70125@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
70126
70127 /* Don't drop the entry if its number of incoming packets is not
70128 located in [0, 8] */
70129- i = atomic_read(&cp->in_pkts);
70130+ i = atomic_read_unchecked(&cp->in_pkts);
70131 if (i > 8 || i < 0) return 0;
70132
70133 if (!todrop_rate[i]) return 0;
70134diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
70135--- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
70136+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
70137@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
70138 ret = cp->packet_xmit(skb, cp, pd->pp);
70139 /* do not touch skb anymore */
70140
70141- atomic_inc(&cp->in_pkts);
70142+ atomic_inc_unchecked(&cp->in_pkts);
70143 ip_vs_conn_put(cp);
70144 return ret;
70145 }
70146@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
70147 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
70148 pkts = sysctl_sync_threshold(ipvs);
70149 else
70150- pkts = atomic_add_return(1, &cp->in_pkts);
70151+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
70152
70153 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
70154 cp->protocol == IPPROTO_SCTP) {
70155diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
70156--- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
70157+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
70158@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
70159 ip_vs_rs_hash(ipvs, dest);
70160 write_unlock_bh(&ipvs->rs_lock);
70161 }
70162- atomic_set(&dest->conn_flags, conn_flags);
70163+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
70164
70165 /* bind the service */
70166 if (!dest->svc) {
70167@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
70168 " %-7s %-6d %-10d %-10d\n",
70169 &dest->addr.in6,
70170 ntohs(dest->port),
70171- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
70172+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
70173 atomic_read(&dest->weight),
70174 atomic_read(&dest->activeconns),
70175 atomic_read(&dest->inactconns));
70176@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
70177 "%-7s %-6d %-10d %-10d\n",
70178 ntohl(dest->addr.ip),
70179 ntohs(dest->port),
70180- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
70181+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
70182 atomic_read(&dest->weight),
70183 atomic_read(&dest->activeconns),
70184 atomic_read(&dest->inactconns));
70185@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
70186 struct ip_vs_dest_user *udest_compat;
70187 struct ip_vs_dest_user_kern udest;
70188
70189+ pax_track_stack();
70190+
70191 if (!capable(CAP_NET_ADMIN))
70192 return -EPERM;
70193
70194@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
70195
70196 entry.addr = dest->addr.ip;
70197 entry.port = dest->port;
70198- entry.conn_flags = atomic_read(&dest->conn_flags);
70199+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
70200 entry.weight = atomic_read(&dest->weight);
70201 entry.u_threshold = dest->u_threshold;
70202 entry.l_threshold = dest->l_threshold;
70203@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
70204 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
70205
70206 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
70207- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
70208+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
70209 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
70210 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
70211 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
70212diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
70213--- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
70214+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
70215@@ -648,7 +648,7 @@ control:
70216 * i.e only increment in_pkts for Templates.
70217 */
70218 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
70219- int pkts = atomic_add_return(1, &cp->in_pkts);
70220+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
70221
70222 if (pkts % sysctl_sync_period(ipvs) != 1)
70223 return;
70224@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
70225
70226 if (opt)
70227 memcpy(&cp->in_seq, opt, sizeof(*opt));
70228- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
70229+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
70230 cp->state = state;
70231 cp->old_state = cp->state;
70232 /*
70233diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
70234--- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
70235+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
70236@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
70237 else
70238 rc = NF_ACCEPT;
70239 /* do not touch skb anymore */
70240- atomic_inc(&cp->in_pkts);
70241+ atomic_inc_unchecked(&cp->in_pkts);
70242 goto out;
70243 }
70244
70245@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
70246 else
70247 rc = NF_ACCEPT;
70248 /* do not touch skb anymore */
70249- atomic_inc(&cp->in_pkts);
70250+ atomic_inc_unchecked(&cp->in_pkts);
70251 goto out;
70252 }
70253
70254diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
70255--- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
70256+++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
70257@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
70258
70259 To compile it as a module, choose M here. If unsure, say N.
70260
70261+config NETFILTER_XT_MATCH_GRADM
70262+ tristate '"gradm" match support'
70263+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
70264+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
70265+ ---help---
70266+ The gradm match allows to match on grsecurity RBAC being enabled.
70267+ It is useful when iptables rules are applied early on bootup to
70268+ prevent connections to the machine (except from a trusted host)
70269+ while the RBAC system is disabled.
70270+
70271 config NETFILTER_XT_MATCH_HASHLIMIT
70272 tristate '"hashlimit" match support'
70273 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
70274diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
70275--- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
70276+++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
70277@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
70278 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
70279 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
70280 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
70281+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
70282 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
70283 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
70284 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
70285diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
70286--- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
70287+++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
70288@@ -70,7 +70,7 @@ struct nfulnl_instance {
70289 };
70290
70291 static DEFINE_SPINLOCK(instances_lock);
70292-static atomic_t global_seq;
70293+static atomic_unchecked_t global_seq;
70294
70295 #define INSTANCE_BUCKETS 16
70296 static struct hlist_head instance_table[INSTANCE_BUCKETS];
70297@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
70298 /* global sequence number */
70299 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
70300 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
70301- htonl(atomic_inc_return(&global_seq)));
70302+ htonl(atomic_inc_return_unchecked(&global_seq)));
70303
70304 if (data_len) {
70305 struct nlattr *nla;
70306diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
70307--- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
70308+++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
70309@@ -58,7 +58,7 @@ struct nfqnl_instance {
70310 */
70311 spinlock_t lock;
70312 unsigned int queue_total;
70313- atomic_t id_sequence; /* 'sequence' of pkt ids */
70314+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
70315 struct list_head queue_list; /* packets in queue */
70316 };
70317
70318@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
70319 nfmsg->version = NFNETLINK_V0;
70320 nfmsg->res_id = htons(queue->queue_num);
70321
70322- entry->id = atomic_inc_return(&queue->id_sequence);
70323+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
70324 pmsg.packet_id = htonl(entry->id);
70325 pmsg.hw_protocol = entskb->protocol;
70326 pmsg.hook = entry->hook;
70327@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
70328 inst->peer_pid, inst->queue_total,
70329 inst->copy_mode, inst->copy_range,
70330 inst->queue_dropped, inst->queue_user_dropped,
70331- atomic_read(&inst->id_sequence), 1);
70332+ atomic_read_unchecked(&inst->id_sequence), 1);
70333 }
70334
70335 static const struct seq_operations nfqnl_seq_ops = {
70336diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
70337--- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
70338+++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
70339@@ -0,0 +1,51 @@
70340+/*
70341+ * gradm match for netfilter
70342